summaryrefslogtreecommitdiffstats
path: root/runtime/oat
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/oat')
-rw-r--r--runtime/oat/runtime/argument_visitor.h249
-rw-r--r--runtime/oat/runtime/arm/context_arm.cc103
-rw-r--r--runtime/oat/runtime/arm/context_arm.h67
-rw-r--r--runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc237
-rw-r--r--runtime/oat/runtime/arm/runtime_support_arm.S1413
-rw-r--r--runtime/oat/runtime/callee_save_frame.h41
-rw-r--r--runtime/oat/runtime/context.cc41
-rw-r--r--runtime/oat/runtime/context.h70
-rw-r--r--runtime/oat/runtime/mips/context_mips.cc102
-rw-r--r--runtime/oat/runtime/mips/context_mips.h64
-rw-r--r--runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc238
-rw-r--r--runtime/oat/runtime/mips/runtime_support_mips.S1187
-rw-r--r--runtime/oat/runtime/oat_support_entrypoints.h177
-rw-r--r--runtime/oat/runtime/support_alloc.cc79
-rw-r--r--runtime/oat/runtime/support_cast.cc68
-rw-r--r--runtime/oat/runtime/support_deoptimize.cc38
-rw-r--r--runtime/oat/runtime/support_dexcache.cc68
-rw-r--r--runtime/oat/runtime/support_field.cc273
-rw-r--r--runtime/oat/runtime/support_fillarray.cc63
-rw-r--r--runtime/oat/runtime/support_instrumentation.cc65
-rw-r--r--runtime/oat/runtime/support_interpreter.cc124
-rw-r--r--runtime/oat/runtime/support_invoke.cc226
-rw-r--r--runtime/oat/runtime/support_jni.cc171
-rw-r--r--runtime/oat/runtime/support_locks.cc42
-rw-r--r--runtime/oat/runtime/support_math.cc77
-rw-r--r--runtime/oat/runtime/support_proxy.cc210
-rw-r--r--runtime/oat/runtime/support_stubs.cc438
-rw-r--r--runtime/oat/runtime/support_thread.cc38
-rw-r--r--runtime/oat/runtime/support_throw.cc98
-rw-r--r--runtime/oat/runtime/x86/context_x86.cc97
-rw-r--r--runtime/oat/runtime/x86/context_x86.h67
-rw-r--r--runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc221
-rw-r--r--runtime/oat/runtime/x86/runtime_support_x86.S1211
-rw-r--r--runtime/oat/utils/arm/assembler_arm.cc1895
-rw-r--r--runtime/oat/utils/arm/assembler_arm.h659
-rw-r--r--runtime/oat/utils/arm/managed_register_arm.cc113
-rw-r--r--runtime/oat/utils/arm/managed_register_arm.h274
-rw-r--r--runtime/oat/utils/arm/managed_register_arm_test.cc767
-rw-r--r--runtime/oat/utils/assembler.cc119
-rw-r--r--runtime/oat/utils/assembler.h459
-rw-r--r--runtime/oat/utils/managed_register.h72
-rw-r--r--runtime/oat/utils/mips/assembler_mips.cc1023
-rw-r--r--runtime/oat/utils/mips/assembler_mips.h513
-rw-r--r--runtime/oat/utils/mips/managed_register_mips.cc114
-rw-r--r--runtime/oat/utils/mips/managed_register_mips.h228
-rw-r--r--runtime/oat/utils/x86/assembler_x86.cc1859
-rw-r--r--runtime/oat/utils/x86/assembler_x86.h655
-rw-r--r--runtime/oat/utils/x86/assembler_x86_test.cc32
-rw-r--r--runtime/oat/utils/x86/managed_register_x86.cc128
-rw-r--r--runtime/oat/utils/x86/managed_register_x86.h218
-rw-r--r--runtime/oat/utils/x86/managed_register_x86_test.cc359
51 files changed, 17150 insertions, 0 deletions
diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h
new file mode 100644
index 0000000..4ab05b9
--- /dev/null
+++ b/runtime/oat/runtime/argument_visitor.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_ARGUMENT_VISITOR_H_
+#define ART_SRC_OAT_RUNTIME_ARGUMENT_VISITOR_H_
+
+#include "object_utils.h"
+
+namespace art {
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class PortableArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define PORTABLE_STACK_ARG_SKIP 0
+#elif defined(__mips__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define PORTABLE_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define PORTABLE_STACK_ARG_SKIP 4
+#else
+#error "Unsupported architecture"
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define PORTABLE_STACK_ARG_SKIP 0
+#endif
+
+ PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ caller_mh_(caller_mh),
+ args_in_regs_(ComputeArgsInRegs(caller_mh)),
+ num_params_(caller_mh.NumArgs()),
+ reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+ stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ + PORTABLE_STACK_ARG_SKIP),
+ cur_args_(reg_args_),
+ cur_arg_index_(0),
+ param_index_(0) {
+ }
+
+ virtual ~PortableArgumentVisitor() {}
+
+ virtual void Visit() = 0;
+
+ bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamAReference(param_index_);
+ }
+
+ bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamALongOrDouble(param_index_);
+ }
+
+ Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.GetParamPrimitiveType(param_index_);
+ }
+
+ byte* GetParamAddress() const {
+ return cur_args_ + (cur_arg_index_ * kPointerSize);
+ }
+
+ void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+#if (defined(__arm__) || defined(__mips__))
+ if (IsParamALongOrDouble() && cur_arg_index_ == 2) {
+ break;
+ }
+#endif
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ cur_args_ = stack_args_;
+ cur_arg_index_ = 0;
+ while (param_index_ < num_params_) {
+#if (defined(__arm__) || defined(__mips__))
+ if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) {
+ cur_arg_index_++;
+ }
+#endif
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ }
+
+ private:
+ static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if (defined(__i386__))
+ return 0;
+#else
+ size_t args_in_regs = 0;
+ size_t num_params = mh.NumArgs();
+ for (size_t i = 0; i < num_params; i++) {
+ args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
+ if (args_in_regs > 3) {
+ args_in_regs = 3;
+ break;
+ }
+ }
+ return args_in_regs;
+#endif
+ }
+ MethodHelper& caller_mh_;
+ const size_t args_in_regs_;
+ const size_t num_params_;
+ byte* const reg_args_;
+ byte* const stack_args_;
+ byte* cur_args_;
+ size_t cur_arg_index_;
+ size_t param_index_;
+};
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class QuickArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__mips__)
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define QUICK_STACK_ARG_SKIP 16
+#else
+#error "Unsupported architecture"
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define QUICK_STACK_ARG_SKIP 0
+#endif
+
+ QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ caller_mh_(caller_mh),
+ args_in_regs_(ComputeArgsInRegs(caller_mh)),
+ num_params_(caller_mh.NumArgs()),
+ reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+ stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ + QUICK_STACK_ARG_SKIP),
+ cur_args_(reg_args_),
+ cur_arg_index_(0),
+ param_index_(0),
+ is_split_long_or_double_(false) {
+ }
+
+ virtual ~QuickArgumentVisitor() {}
+
+ virtual void Visit() = 0;
+
+ bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamAReference(param_index_);
+ }
+
+ bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamALongOrDouble(param_index_);
+ }
+
+ Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.GetParamPrimitiveType(param_index_);
+ }
+
+ byte* GetParamAddress() const {
+ return cur_args_ + (cur_arg_index_ * kPointerSize);
+ }
+
+ bool IsSplitLongOrDouble() const {
+ return is_split_long_or_double_;
+ }
+
+ uint64_t ReadSplitLongParam() const {
+ DCHECK(IsSplitLongOrDouble());
+ uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
+ uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
+ return (low_half & 0xffffffffULL) | (high_half << 32);
+
+ }
+
+ void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+ is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ cur_args_ = stack_args_;
+ cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
+ is_split_long_or_double_ = false;
+ while (param_index_ < num_params_) {
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ }
+
+ private:
+ static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t args_in_regs = 0;
+ size_t num_params = mh.NumArgs();
+ for (size_t i = 0; i < num_params; i++) {
+ args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
+ if (args_in_regs > 3) {
+ args_in_regs = 3;
+ break;
+ }
+ }
+ return args_in_regs;
+ }
+ MethodHelper& caller_mh_;
+ const size_t args_in_regs_;
+ const size_t num_params_;
+ byte* const reg_args_;
+ byte* const stack_args_;
+ byte* cur_args_;
+ size_t cur_arg_index_;
+ size_t param_index_;
+ // Does a 64bit parameter straddle the register and stack arguments?
+ bool is_split_long_or_double_;
+};
+
+}
+
+#endif // ART_SRC_OAT_RUNTIME_ARGUMENT_VISITOR_H_
diff --git a/runtime/oat/runtime/arm/context_arm.cc b/runtime/oat/runtime/arm/context_arm.cc
new file mode 100644
index 0000000..4e42e94
--- /dev/null
+++ b/runtime/oat/runtime/arm/context_arm.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context_arm.h"
+
+#include "mirror/abstract_method.h"
+#include "mirror/object-inl.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+namespace arm {
+
+static const uint32_t gZero = 0;
+
+void ArmContext::Reset() {
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
+ gprs_[i] = NULL;
+ }
+ for (size_t i = 0; i < kNumberOfSRegisters; i++) {
+ fprs_[i] = NULL;
+ }
+ gprs_[SP] = &sp_;
+ gprs_[PC] = &pc_;
+ // Initialize registers with easy to spot debug values.
+ sp_ = ArmContext::kBadGprBase + SP;
+ pc_ = ArmContext::kBadGprBase + PC;
+}
+
+void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
+ mirror::AbstractMethod* method = fr.GetMethod();
+ uint32_t core_spills = method->GetCoreSpillMask();
+ uint32_t fp_core_spills = method->GetFpSpillMask();
+ size_t spill_count = __builtin_popcount(core_spills);
+ size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+ size_t frame_size = method->GetFrameSizeInBytes();
+ if (spill_count > 0) {
+ // Lowest number spill is farthest away, walk registers and fill into context
+ int j = 1;
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
+ if (((core_spills >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ j++;
+ }
+ }
+ }
+ if (fp_spill_count > 0) {
+ // Lowest number spill is farthest away, walk registers and fill into context
+ int j = 1;
+ for (size_t i = 0; i < kNumberOfSRegisters; i++) {
+ if (((fp_core_spills >> i) & 1) != 0) {
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+ j++;
+ }
+ }
+ }
+}
+
+void ArmContext::SetGPR(uint32_t reg, uintptr_t value) {
+ DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
+ DCHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
+ DCHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
+}
+
+void ArmContext::SmashCallerSaves() {
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[R0] = const_cast<uint32_t*>(&gZero);
+ gprs_[R1] = const_cast<uint32_t*>(&gZero);
+ gprs_[R2] = NULL;
+ gprs_[R3] = NULL;
+}
+
+extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*);
+
+void ArmContext::DoLongJump() {
+ uintptr_t gprs[16];
+ uint32_t fprs[32];
+ for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
+ gprs[i] = gprs_[i] != NULL ? *gprs_[i] : ArmContext::kBadGprBase + i;
+ }
+ for (size_t i = 0; i < kNumberOfSRegisters; ++i) {
+ fprs[i] = fprs_[i] != NULL ? *fprs_[i] : ArmContext::kBadGprBase + i;
+ }
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]);
+ art_quick_do_long_jump(gprs, fprs);
+}
+
+} // namespace arm
+} // namespace art
diff --git a/runtime/oat/runtime/arm/context_arm.h b/runtime/oat/runtime/arm/context_arm.h
new file mode 100644
index 0000000..ec1d4cb
--- /dev/null
+++ b/runtime/oat/runtime/arm/context_arm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+#define ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+
+#include "locks.h"
+#include "constants_arm.h"
+#include "oat/runtime/context.h"
+
+namespace art {
+namespace arm {
+
+class ArmContext : public Context {
+ public:
+ ArmContext() {
+ Reset();
+ }
+
+ virtual ~ArmContext() {}
+
+ virtual void Reset();
+
+ virtual void FillCalleeSaves(const StackVisitor& fr);
+
+ virtual void SetSP(uintptr_t new_sp) {
+ SetGPR(SP, new_sp);
+ }
+
+ virtual void SetPC(uintptr_t new_pc) {
+ SetGPR(PC, new_pc);
+ }
+
+ virtual uintptr_t GetGPR(uint32_t reg) {
+ CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
+ return *gprs_[reg];
+ }
+
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
+ virtual void SmashCallerSaves();
+ virtual void DoLongJump();
+
+ private:
+ // Pointers to register locations, initialized to NULL or the specific registers below.
+ uintptr_t* gprs_[kNumberOfCoreRegisters];
+ uint32_t* fprs_[kNumberOfSRegisters];
+ // Hold values for sp and pc if they are not located within a stack frame.
+ uintptr_t sp_, pc_;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
diff --git a/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc b/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc
new file mode 100644
index 0000000..2e9453c
--- /dev/null
+++ b/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "runtime_support.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
+extern "C" void art_quick_check_cast_from_code(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+
+// Exception entrypoints.
+extern "C" void* GetAndClearException(Thread*);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
+extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
+extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object_from_code(void*);
+extern "C" void art_quick_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+
+// Math conversions.
+extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT
+extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT
+extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT
+extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE
+
+// Single-precision FP arithmetics.
+extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
+
+// Double-precision FP arithmetics.
+extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+
+// Integer arithmetics.
+extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16]
+
+// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
+extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
+extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
+extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero_from_code();
+extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception_from_code();
+extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+
+void InitEntryPoints(EntryPoints* points) {
+ // Alloc
+ points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+
+ // Cast
+ points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
+ points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ points->pCheckCastFromCode = art_quick_check_cast_from_code;
+
+ // DexCache
+ points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ points->pResolveStringFromCode = art_quick_resolve_string_from_code;
+
+ // Field
+ points->pSet32Instance = art_quick_set32_instance_from_code;
+ points->pSet32Static = art_quick_set32_static_from_code;
+ points->pSet64Instance = art_quick_set64_instance_from_code;
+ points->pSet64Static = art_quick_set64_static_from_code;
+ points->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ points->pSetObjStatic = art_quick_set_obj_static_from_code;
+ points->pGet32Instance = art_quick_get32_instance_from_code;
+ points->pGet64Instance = art_quick_get64_instance_from_code;
+ points->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ points->pGet32Static = art_quick_get32_static_from_code;
+ points->pGet64Static = art_quick_get64_static_from_code;
+ points->pGetObjStatic = art_quick_get_obj_static_from_code;
+
+ // FillArray
+ points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+
+ // JNI
+ points->pJniMethodStart = JniMethodStart;
+ points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ points->pJniMethodEnd = JniMethodEnd;
+ points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ points->pJniMethodEndWithReference = JniMethodEndWithReference;
+ points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ points->pLockObjectFromCode = art_quick_lock_object_from_code;
+ points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+
+ // Math
+ points->pCmpgDouble = CmpgDouble;
+ points->pCmpgFloat = CmpgFloat;
+ points->pCmplDouble = CmplDouble;
+ points->pCmplFloat = CmplFloat;
+ points->pFmod = fmod;
+ points->pSqrt = sqrt;
+ points->pL2d = __aeabi_l2d;
+ points->pFmodf = fmodf;
+ points->pL2f = __aeabi_l2f;
+ points->pD2iz = __aeabi_d2iz;
+ points->pF2iz = __aeabi_f2iz;
+ points->pIdivmod = __aeabi_idivmod;
+ points->pD2l = art_d2l;
+ points->pF2l = art_f2l;
+ points->pLdiv = __aeabi_ldivmod;
+ points->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3
+ points->pLmul = art_quick_mul_long;
+ points->pShlLong = art_quick_shl_long;
+ points->pShrLong = art_quick_shr_long;
+ points->pUshrLong = art_quick_ushr_long;
+
+ // Interpreter
+ points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
+ // Intrinsics
+ points->pIndexOf = art_quick_indexof;
+ points->pMemcmp16 = __memcmp16;
+ points->pStringCompareTo = art_quick_string_compareto;
+ points->pMemcpy = memcpy;
+
+ // Invocation
+ points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+ points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ points->pCheckSuspendFromCode = CheckSuspendFromCode;
+ points->pTestSuspendFromCode = art_quick_test_suspend;
+
+ // Throws
+ points->pDeliverException = art_quick_deliver_exception_from_code;
+ points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+};
+
+} // namespace art
diff --git a/runtime/oat/runtime/arm/runtime_support_arm.S b/runtime/oat/runtime/arm/runtime_support_arm.S
new file mode 100644
index 0000000..f19e8ba
--- /dev/null
+++ b/runtime/oat/runtime/arm/runtime_support_arm.S
@@ -0,0 +1,1413 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support.h"
+
+ /* Deliver the given exception */
+ .extern artDeliverExceptionFromCode
+ /* Deliver an exception pending on a thread */
+ .extern artDeliverPendingException
+
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ .fnstart
+.endm
+
+.macro END name
+ .fnend
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ */
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ push {r4-r11, lr} @ 9 words of callee saves
+ .save {r4-r11, lr}
+ .cfi_adjust_cfa_offset 36
+ .cfi_rel_offset r4, 0
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset r6, 8
+ .cfi_rel_offset r7, 12
+ .cfi_rel_offset r8, 16
+ .cfi_rel_offset r9, 20
+ .cfi_rel_offset r10, 24
+ .cfi_rel_offset r11, 28
+ .cfi_rel_offset lr, 32
+ vpush {s0-s31}
+ .pad #128
+ .cfi_adjust_cfa_offset 128
+ sub sp, #12 @ 3 words of space, bottom word will hold Method*
+ .pad #12
+ .cfi_adjust_cfa_offset 12
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
+ */
+.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ push {r5-r8, r10-r11, lr} @ 7 words of callee saves
+ .save {r5-r8, r10-r11, lr}
+ .cfi_adjust_cfa_offset 28
+ .cfi_rel_offset r5, 0
+ .cfi_rel_offset r6, 4
+ .cfi_rel_offset r7, 8
+ .cfi_rel_offset r8, 12
+ .cfi_rel_offset r10, 16
+ .cfi_rel_offset r11, 20
+ .cfi_rel_offset lr, 24
+ sub sp, #4 @ bottom word will hold Method*
+ .pad #4
+ .cfi_adjust_cfa_offset 4
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ ldr lr, [sp, #28] @ restore lr for return
+ add sp, #32 @ unwind stack
+ .cfi_adjust_cfa_offset -32
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ ldr lr, [sp, #28] @ restore lr for return
+ add sp, #32 @ unwind stack
+ .cfi_adjust_cfa_offset -32
+ bx lr @ return
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ */
+.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
+ .save {r1-r3, r5-r8, r10-r11, lr}
+ .cfi_adjust_cfa_offset 40
+ .cfi_rel_offset r1, 0
+ .cfi_rel_offset r2, 4
+ .cfi_rel_offset r3, 8
+ .cfi_rel_offset r5, 12
+ .cfi_rel_offset r6, 16
+ .cfi_rel_offset r7, 20
+ .cfi_rel_offset r8, 24
+ .cfi_rel_offset r10, 28
+ .cfi_rel_offset r11, 32
+ .cfi_rel_offset lr, 36
+ sub sp, #8 @ 2 words of space, bottom word will hold Method*
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+.endm
+
+.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ ldr r1, [sp, #8] @ restore non-callee save r1
+ ldrd r2, [sp, #12] @ restore non-callee saves r2-r3
+ ldr lr, [sp, #44] @ restore lr
+ add sp, #48 @ rewind sp
+ .cfi_adjust_cfa_offset -48
+.endm
+
+ /*
+ * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_
+ */
+.macro DELIVER_PENDING_EXCEPTION
+ .fnend
+ .fnstart
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME @ save callee saves for throw
+ mov r0, r9 @ pass Thread::Current
+ mov r1, sp @ pass SP
+ b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*, SP)
+.endm
+
+.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov r0, r9 @ pass Thread::Current
+ mov r1, sp @ pass SP
+ b \cxx_name @ \cxx_name(Thread*, SP)
+END \c_name
+.endm
+
+.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov r1, r9 @ pass Thread::Current
+ mov r2, sp @ pass SP
+ b \cxx_name @ \cxx_name(Thread*, SP)
+END \c_name
+.endm
+
+.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ b \cxx_name @ \cxx_name(Thread*, SP)
+END \c_name
+.endm
+
+ /*
+ * Called by managed code, saves callee saves and then calls artThrowException
+ * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode
+
+ /*
+ * Called by managed code to create and deliver a NullPointerException.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+
+ /*
+ * Called by managed code to create and deliver an ArithmeticException.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode
+
+ /*
+ * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
+ * index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
+ *
+ * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
+ * of the target Method* in r0 and method->code_ in r1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+ * pointing back to the original caller.
+ */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME @ save callee saves in case allocation triggers GC
+ ldr r2, [sp, #48] @ pass caller Method*
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl \cxx_name @ (method_idx, this, caller, Thread*, SP)
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ mov r12, r1 @ save Method*->code_
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ did we find the target?
+ bxne r12 @ tail call to target if so
+ DELIVER_PENDING_EXCEPTION
+END \c_name
+.endm
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+ /*
+ * Portable invocation stub.
+ * On entry:
+ * r0 = method pointer
+ * r1 = argument array or NULL for no argument methods
+ * r2 = size of argument array in bytes
+ * r3 = (managed) thread pointer
+ * [sp] = JValue* result
+ * [sp + 4] = result type char
+ */
+ENTRY art_portable_invoke_stub
+ push {r0, r4, r5, r9, r11, lr} @ spill regs
+ .save {r0, r4, r5, r9, r11, lr}
+ .pad #24
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r4, 4
+ .cfi_rel_offset r5, 8
+ .cfi_rel_offset r9, 12
+ .cfi_rel_offset r11, 16
+ .cfi_rel_offset lr, 20
+ mov r11, sp @ save the stack pointer
+ .cfi_def_cfa_register r11
+ mov r9, r3 @ move managed thread pointer into r9
+ mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
+ add r5, r2, #16 @ create space for method pointer in frame
+ and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
+ sub sp, r5 @ reserve stack space for argument array
+ add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
+ bl memcpy @ memcpy (dest, src, bytes)
+ ldr r0, [r11] @ restore method*
+ ldr r1, [sp, #4] @ copy arg value for r1
+ ldr r2, [sp, #8] @ copy arg value for r2
+ ldr r3, [sp, #12] @ copy arg value for r3
+ mov ip, #0 @ set ip to 0
+ str ip, [sp] @ store NULL for method* at bottom of frame
+ add sp, #16 @ first 4 args are not passed on stack for portable
+ ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
+ blx ip @ call the method
+ mov sp, r11 @ restore the stack pointer
+ ldr ip, [sp, #24] @ load the result pointer
+ strd r0, [ip] @ store r0/r1 into result pointer
+ pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
+ .cfi_adjust_cfa_offset -24
+ bx lr
+END art_portable_invoke_stub
+
+ /*
+ * Quick invocation stub.
+ * On entry:
+ * r0 = method pointer
+ * r1 = argument array or NULL for no argument methods
+ * r2 = size of argument array in bytes
+ * r3 = (managed) thread pointer
+ * [sp] = JValue* result
+ * [sp + 4] = result type char
+ */
+ENTRY art_quick_invoke_stub
+ push {r0, r4, r5, r9, r11, lr} @ spill regs
+ .save {r0, r4, r5, r9, r11, lr}
+ .pad #24
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r4, 4
+ .cfi_rel_offset r5, 8
+ .cfi_rel_offset r9, 12
+ .cfi_rel_offset r11, 16
+ .cfi_rel_offset lr, 20
+ mov r11, sp @ save the stack pointer
+ .cfi_def_cfa_register r11
+ mov r9, r3 @ move managed thread pointer into r9
+ mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
+ add r5, r2, #16 @ create space for method pointer in frame
+ and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
+ sub sp, r5 @ reserve stack space for argument array
+ add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
+ bl memcpy @ memcpy (dest, src, bytes)
+ ldr r0, [r11] @ restore method*
+ ldr r1, [sp, #4] @ copy arg value for r1
+ ldr r2, [sp, #8] @ copy arg value for r2
+ ldr r3, [sp, #12] @ copy arg value for r3
+ mov ip, #0 @ set ip to 0
+ str ip, [sp] @ store NULL for method* at bottom of frame
+ ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
+ blx ip @ call the method
+ mov sp, r11 @ restore the stack pointer
+ ldr ip, [sp, #24] @ load the result pointer
+ strd r0, [ip] @ store r0/r1 into result pointer
+ pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
+ .cfi_adjust_cfa_offset -24
+ bx lr
+END art_quick_invoke_stub
+
+ /*
+ * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_
+ */
+ENTRY art_quick_do_long_jump
+ vldm r1, {s0-s31} @ load all fprs from argument fprs_
+ ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15)
+ add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3
+ ldm r0, {r3-r14} @ load remaining gprs from argument gprs_
+ mov r0, #0 @ clear result registers r0 and r1
+ mov r1, #0
+ bx r2 @ do long jump
+END art_quick_do_long_jump
+
+ /*
+ * Entry point of native methods when JNI bug compatibility is enabled.
+ */
+ .extern artWorkAroundAppJniBugs
+ENTRY art_quick_work_around_app_jni_bugs
+ @ save registers that may contain arguments and LR that will be crushed by a call
+ push {r0-r3, lr}
+ .save {r0-r3, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ sub sp, #12 @ 3 words of space for alignment
+ mov r0, r9 @ pass Thread::Current
+ mov r1, sp @ pass SP
+ bl artWorkAroundAppJniBugs @ (Thread*, SP)
+ add sp, #12 @ rewind stack
+ mov r12, r0 @ save target address
+ pop {r0-r3, lr} @ restore possibly modified argument registers
+ .cfi_adjust_cfa_offset -16
+ bx r12 @ tail call into JNI routine
+END art_quick_work_around_app_jni_bugs
+
+ /*
+ * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
+ * failure.
+ */
+ .extern artHandleFillArrayDataFromCode
+ENTRY art_quick_handle_fill_data_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artHandleFillArrayDataFromCode @ (Array*, const DexFile::Payload*, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success?
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_handle_fill_data_from_code
+
+ /*
+ * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+ */
+ .extern artLockObjectFromCode
+ENTRY art_quick_lock_object_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case we block
+ mov r1, r9 @ pass Thread::Current
+ mov r2, sp @ pass SP
+ bl artLockObjectFromCode @ (Object* obj, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_lock_object_from_code
+
+ /*
+ * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+ */
+ .extern artUnlockObjectFromCode
+ENTRY art_quick_unlock_object_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC
+ mov r1, r9 @ pass Thread::Current
+ mov r2, sp @ pass SP
+ bl artUnlockObjectFromCode @ (Object* obj, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success?
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_unlock_object_from_code
+
+ /*
+ * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ */
+ .extern artCheckCastFromCode
+ENTRY art_quick_check_cast_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artCheckCastFromCode @ (Class* a, Class* b, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success?
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_check_cast_from_code
+
+ /*
+ * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
+ * failure.
+ */
+ .extern artCanPutArrayElementFromCode
+ENTRY art_quick_can_put_array_element_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artCanPutArrayElementFromCode @ (Object* element, Class* array_class, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success?
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_can_put_array_element_from_code
+
+ /*
+ * Entry from managed code when uninitialized static storage, this stub will run the class
+ * initializer and deliver the exception on error. On success the static storage base is
+ * returned.
+ */
+ .extern artInitializeStaticStorageFromCode
+ENTRY art_quick_initialize_static_storage_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+ bl artInitializeStaticStorageFromCode
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_initialize_static_storage_from_code
+
+ /*
+ * Entry from managed code when dex cache misses for a type_idx
+ */
+ .extern artInitializeTypeFromCode
+ENTRY art_quick_initialize_type_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+ bl artInitializeTypeFromCode
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_initialize_type_from_code
+
+ /*
+ * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+ * miss.
+ */
+ .extern artInitializeTypeAndVerifyAccessFromCode
+ENTRY art_quick_initialize_type_and_verify_access_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ @ artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+ bl artInitializeTypeAndVerifyAccessFromCode
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_initialize_type_and_verify_access_from_code
+
+ /*
+ * Called by managed code to resolve a static field and load a 32-bit primitive value.
+ */
+ .extern artGet32StaticFromCode
+ENTRY art_quick_get32_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r1, [sp, #32] @ pass referrer
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artGet32StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_get32_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and load a 64-bit primitive value.
+ */
+ .extern artGet64StaticFromCode
+ENTRY art_quick_get64_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r1, [sp, #32] @ pass referrer
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_get64_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and load an object reference.
+ */
+ .extern artGetObjStaticFromCode
+ENTRY art_quick_get_obj_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r1, [sp, #32] @ pass referrer
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artGetObjStaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_get_obj_static_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+ */
+ .extern artGet32InstanceFromCode
+ENTRY art_quick_get32_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r2, [sp, #32] @ pass referrer
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ bl artGet32InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP)
+ add sp, #16 @ strip the extra frame
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_get32_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+ */
+ .extern artGet64InstanceFromCode
+ENTRY art_quick_get64_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r2, [sp, #32] @ pass referrer
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP)
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_get64_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and load an object reference.
+ */
+ .extern artGetObjInstanceFromCode
+ENTRY art_quick_get_obj_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r2, [sp, #32] @ pass referrer
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl artGetObjInstanceFromCode @ (field_idx, Object*, referrer, Thread*, SP)
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_get_obj_instance_from_code
+
+ /*
+ * Called by managed code to resolve a static field and store a 32-bit primitive value.
+ */
+ .extern artSet32StaticFromCode
+ENTRY art_quick_set32_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r2, [sp, #32] @ pass referrer
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl artSet32StaticFromCode @ (field_idx, new_val, referrer, Thread*, SP)
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is 0
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set32_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and store a 64-bit primitive value.
+ * On entry r0 holds field index, r1:r2 hold new_val
+ */
+ .extern artSet64StaticFromCode
+ENTRY art_quick_set64_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r2 @ pass one half of wide argument
+ mov r2, r1 @ pass other half of wide argument
+ ldr r1, [sp, #32] @ pass referrer
+ mov r12, sp @ save SP
+ sub sp, #8 @ grow frame for alignment with stack args
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ push {r9, r12} @ pass Thread::Current and SP
+ .save {r9, r12}
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset r9, 0
+ bl artSet64StaticFromCode @ (field_idx, referrer, new_val, Thread*, SP)
+ add sp, #16 @ release out args
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ cmp r0, #0 @ success if result is 0
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set64_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and store an object reference.
+ */
+ .extern artSetObjStaticFromCode
+ENTRY art_quick_set_obj_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r2, [sp, #32] @ pass referrer
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl artSetObjStaticFromCode @ (field_idx, new_val, referrer, Thread*, SP)
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is 0
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set_obj_static_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set32_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r3, [sp, #32] @ pass referrer
+ mov r12, sp @ save SP
+ sub sp, #8 @ grow frame for alignment with stack args
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ push {r9, r12} @ pass Thread::Current and SP
+ .save {r9, r12}
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset r9, 0
+ .cfi_rel_offset r12, 4
+ bl artSet32InstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+ add sp, #16 @ release out args
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ cmp r0, #0 @ success if result is 0
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set32_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set64_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r12, sp @ save SP
+ sub sp, #8 @ grow frame for alignment with stack args
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ push {r9, r12} @ pass Thread::Current and SP
+ .save {r9, r12}
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset r9, 0
+ bl artSet64InstanceFromCode @ (field_idx, Object*, new_val, Thread*, SP)
+ add sp, #16 @ release out args
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ cmp r0, #0 @ success if result is 0
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set64_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and store an object reference.
+ */
+ .extern artSetObjInstanceFromCode
+ENTRY art_quick_set_obj_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ ldr r3, [sp, #32] @ pass referrer
+ mov r12, sp @ save SP
+ sub sp, #8 @ grow frame for alignment with stack args
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ push {r9, r12} @ pass Thread::Current and SP
+ .save {r9, r12}
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset r9, 0
+ bl artSetObjInstanceFromCode @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+ add sp, #16 @ release out args
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here
+ cmp r0, #0 @ success if result is 0
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_set_obj_instance_from_code
+
+ /*
+ * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+ * exception on error. On success the String is returned. R0 holds the referring method,
+ * R1 holds the string index. The fast path check for hit in strings cache has already been
+ * performed.
+ */
+ .extern artResolveStringFromCode
+ENTRY art_quick_resolve_string_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, SP)
+ bl artResolveStringFromCode
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_resolve_string_from_code
+
+ /*
+ * Called by managed code to allocate an object
+ */
+ .extern artAllocObjectFromCode
+ENTRY art_quick_alloc_object_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artAllocObjectFromCode @ (uint32_t type_idx, Method* method, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_object_from_code
+
+ /*
+ * Called by managed code to allocate an object when the caller doesn't know whether it has
+ * access to the created type.
+ */
+ .extern artAllocObjectFromCodeWithAccessCheck
+ENTRY art_quick_alloc_object_from_code_with_access_check
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ bl artAllocObjectFromCodeWithAccessCheck @ (uint32_t type_idx, Method* method, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_object_from_code_with_access_check
+
+ /*
+ * Called by managed code to allocate an array.
+ */
+ .extern artAllocArrayFromCode
+ENTRY art_quick_alloc_array_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
+ bl artAllocArrayFromCode
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_array_from_code
+
+ /*
+ * Called by managed code to allocate an array when the caller doesn't know whether it has
+ * access to the created type.
+ */
+ .extern artAllocArrayFromCodeWithAccessCheck
+ENTRY art_quick_alloc_array_from_code_with_access_check
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, SP)
+ bl artAllocArrayFromCodeWithAccessCheck
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_alloc_array_from_code_with_access_check
+
+ /*
+ * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+ */
+ .extern artCheckAndAllocArrayFromCode
+ENTRY art_quick_check_and_alloc_array_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , SP)
+ bl artCheckAndAllocArrayFromCode
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_check_and_alloc_array_from_code
+
+ /*
+ * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+ */
+ .extern artCheckAndAllocArrayFromCodeWithAccessCheck
+ENTRY art_quick_check_and_alloc_array_from_code_with_access_check
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC
+ mov r3, r9 @ pass Thread::Current
+ mov r12, sp
+ str r12, [sp, #-16]! @ expand the frame and pass SP
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ @ artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , SP)
+ bl artCheckAndAllocArrayFromCodeWithAccessCheck
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ cmp r0, #0 @ success if result is non-null
+ bxne lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_check_and_alloc_array_from_code_with_access_check
+
+ /*
+ * Called by managed code when the value in rSUSPEND has been decremented to 0.
+ */
+ .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+ ldrh r0, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov rSUSPEND, #SUSPEND_CHECK_INTERVAL @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ cmp r0, #0 @ check Thread::Current()->suspend_count_ == 0
+ bxeq lr @ return if suspend_count_ == 0
+ mov r0, rSELF
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves for stack crawl
+ mov r1, sp
+ bl artTestSuspendFromCode @ (Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
+
+ .extern artPortableProxyInvokeHandler
+ENTRY art_portable_proxy_invoke_handler
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ str r0, [sp, #0] @ place proxy method at bottom of frame
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr lr, [sp, #44] @ restore lr
+ add sp, #48 @ pop frame
+ .cfi_adjust_cfa_offset -48
+ bx lr @ return
+END art_portable_proxy_invoke_handler
+
+ /*
+ * Called by managed code that is attempting to call a method on a proxy class. On entry
+ * r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The
+ * frame size of the invoked proxy method agrees with a ref and args callee save frame.
+ */
+ .extern artQuickProxyInvokeHandler
+ENTRY art_quick_proxy_invoke_handler
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ str r0, [sp, #0] @ place proxy method at bottom of frame
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr lr, [sp, #44] @ restore lr
+ add sp, #48 @ pop frame
+ .cfi_adjust_cfa_offset -48
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_proxy_invoke_handler
+
+ .extern artInterpreterEntry
+ENTRY art_quick_interpreter_entry
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ str r0, [sp, #0] @ place proxy method at bottom of frame
+ mov r1, r9 @ pass Thread::Current
+ mov r2, sp @ pass SP
+ blx artInterpreterEntry @ (Method* method, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr lr, [sp, #44] @ restore lr
+ add sp, #48 @ pop frame
+ .cfi_adjust_cfa_offset -48
+ cmp r12, #0 @ success if no exception is pending
+ bxeq lr @ return on success
+ DELIVER_PENDING_EXCEPTION
+END art_quick_interpreter_entry
+
+ /*
+ * Routine that intercepts method calls and returns.
+ */
+ .extern artInstrumentationMethodEntryFromCode
+ .extern artInstrumentationMethodExitFromCode
+ENTRY art_quick_instrumentation_entry_from_code
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ str r0, [sp, #4] @ preserve r0
+ mov r12, sp @ remember sp
+ str lr, [sp, #-16]! @ expand the frame and pass LR
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset lr, 0
+ mov r2, r9 @ pass Thread::Current
+ mov r3, r12 @ pass SP
+ blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP, LR)
+ add sp, #16 @ remove out argument and padding from stack
+ .cfi_adjust_cfa_offset -16
+ mov r12, r0 @ r12 holds reference to code
+ ldr r0, [sp, #4] @ restore r0
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ blx r12 @ call method with lr set to art_quick_instrumentation_exit_from_code
+END art_quick_instrumentation_entry_from_code
+ .type art_quick_instrumentation_exit_from_code, #function
+ .global art_quick_instrumentation_exit_from_code
+art_quick_instrumentation_exit_from_code:
+ .cfi_startproc
+ .fnstart
+ mov lr, #0 @ link register is to here, so clobber with 0 for later checks
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ mov r12, sp @ remember bottom of caller's frame
+ push {r0-r1} @ save return value
+ .save {r0-r1}
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ sub sp, #8 @ space for return value argument
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ strd r0, [sp] @ r0/r1 -> [sp] for fpr_res
+ mov r2, r0 @ pass return value as gpr_res
+ mov r3, r1
+ mov r0, r9 @ pass Thread::Current
+ mov r1, r12 @ pass SP
+ blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res, fpr_res)
+ add sp, #8
+ .cfi_adjust_cfa_offset -8
+
+ mov r2, r0 @ link register saved by instrumentation
+ mov lr, r1 @ r1 is holding link register if we're to bounce to deoptimize
+ pop {r0, r1} @ restore return value
+ add sp, #32 @ remove callee save frame
+ .cfi_adjust_cfa_offset -32
+ bx r2 @ return
+END art_quick_instrumentation_exit_from_code
+
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimize
+ENTRY art_quick_deoptimize
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ mov r0, r9 @ Set up args.
+ mov r1, sp
+ blx artDeoptimize @ artDeoptimize(Thread*, SP)
+END art_quick_deoptimize
+
+ /*
+ * Portable abstract method error stub. r0 contains method* on entry. SP unused in portable.
+ */
+ .extern artThrowAbstractMethodErrorFromCode
+ENTRY art_portable_abstract_method_error_stub
+ mov r1, r9 @ pass Thread::Current
+ b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP)
+END art_portable_abstract_method_error_stub
+
+ /*
+ * Quick abstract method error stub. r0 contains method* on entry.
+ */
+ENTRY art_quick_abstract_method_error_stub
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ mov r1, r9 @ pass Thread::Current
+ mov r2, sp @ pass SP
+ b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP)
+END art_quick_abstract_method_error_stub
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+ .extern artFindNativeMethod
+ENTRY art_jni_dlsym_lookup_stub
+ push {r0, r1, r2, r3, lr} @ spill regs
+ .save {r0, r1, r2, r3, lr}
+ .pad #20
+ .cfi_adjust_cfa_offset 20
+ sub sp, #12 @ pad stack pointer to align frame
+ .pad #12
+ .cfi_adjust_cfa_offset 12
+ mov r0, r9 @ pass Thread::Current
+ blx artFindNativeMethod @ (Thread*)
+ mov r12, r0 @ save result in r12
+ add sp, #12 @ restore stack pointer
+ .cfi_adjust_cfa_offset -12
+ pop {r0, r1, r2, r3, lr} @ restore regs
+ .cfi_adjust_cfa_offset -20
+ cmp r12, #0 @ is method code null?
+ bxne r12 @ if non-null, tail call to method's code
+ bx lr @ otherwise, return to caller to handle exception
+END art_jni_dlsym_lookup_stub
+
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ENTRY art_quick_mul_long
+ push {r9 - r10}
+ .save {r9 - r10}
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset r9, 0
+ .cfi_rel_offset r10, 4
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ mov r0,r9
+ mov r1,r10
+ pop {r9 - r10}
+ .cfi_adjust_cfa_offset -8
+ bx lr
+END art_quick_mul_long
+
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ * On entry:
+ * r0: low word
+ * r1: high word
+ * r2: shift count
+ */
+ /* shl-long vAA, vBB, vCC */
+ENTRY art_quick_shl_long
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ bx lr
+END art_quick_shl_long
+
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ * On entry:
+ * r0: low word
+ * r1: high word
+ * r2: shift count
+ */
+ /* shr-long vAA, vBB, vCC */
+ENTRY art_quick_shr_long
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ bx lr
+END art_quick_shr_long
+
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ * On entry:
+ * r0: low word
+ * r1: high word
+ * r2: shift count
+ */
+ /* ushr-long vAA, vBB, vCC */
+ENTRY art_quick_ushr_long
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ bx lr
+END art_quick_ushr_long
+
+ /*
+ * String's indexOf.
+ *
+ * On entry:
+ * r0: string object (known non-null)
+ * r1: char to match (known <= 0xFFFF)
+ * r2: Starting offset in string data
+ */
+ENTRY art_quick_indexof
+ push {r4, r10-r11, lr} @ 4 words of callee saves
+ .save {r4, r10-r11, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r4, 0
+ .cfi_rel_offset r10, 4
+ .cfi_rel_offset r11, 8
+ .cfi_rel_offset lr, 12
+ ldr r3, [r0, #STRING_COUNT_OFFSET]
+ ldr r12, [r0, #STRING_OFFSET_OFFSET]
+ ldr r0, [r0, #STRING_VALUE_OFFSET]
+
+ /* Clamp start to [0..count] */
+ cmp r2, #0
+ movlt r2, #0
+ cmp r2, r3
+ movgt r2, r3
+
+ /* Build a pointer to the start of string data */
+ add r0, #STRING_DATA_OFFSET
+ add r0, r0, r12, lsl #1
+
+ /* Save a copy in r12 to later compute result */
+ mov r12, r0
+
+ /* Build pointer to start of data to compare and pre-bias */
+ add r0, r0, r2, lsl #1
+ sub r0, #2
+
+ /* Compute iteration count */
+ sub r2, r3, r2
+
+ /*
+ * At this point we have:
+ * r0: start of data to test
+ * r1: char to compare
+ * r2: iteration count
+ * r12: original start of string data
+ * r3, r4, r10, r11 available for loading string data
+ */
+
+ subs r2, #4
+ blt indexof_remainder
+
+indexof_loop4:
+ ldrh r3, [r0, #2]!
+ ldrh r4, [r0, #2]!
+ ldrh r10, [r0, #2]!
+ ldrh r11, [r0, #2]!
+ cmp r3, r1
+ beq match_0
+ cmp r4, r1
+ beq match_1
+ cmp r10, r1
+ beq match_2
+ cmp r11, r1
+ beq match_3
+ subs r2, #4
+ bge indexof_loop4
+
+indexof_remainder:
+ adds r2, #4
+ beq indexof_nomatch
+
+indexof_loop1:
+ ldrh r3, [r0, #2]!
+ cmp r3, r1
+ beq match_3
+ subs r2, #1
+ bne indexof_loop1
+
+indexof_nomatch:
+ mov r0, #-1
+ pop {r4, r10-r11, pc}
+
+match_0:
+ sub r0, #6
+ sub r0, r12
+ asr r0, r0, #1
+ pop {r4, r10-r11, pc}
+match_1:
+ sub r0, #4
+ sub r0, r12
+ asr r0, r0, #1
+ pop {r4, r10-r11, pc}
+match_2:
+ sub r0, #2
+ sub r0, r12
+ asr r0, r0, #1
+ pop {r4, r10-r11, pc}
+match_3:
+ sub r0, r12
+ asr r0, r0, #1
+ pop {r4, r10-r11, pc}
+END art_quick_indexof
+
+ /*
+ * String's compareTo.
+ *
+ * Requires rARG0/rARG1 to have been previously checked for null. Will
+ * return negative if this's string is < comp, 0 if they are the
+ * same and positive if >.
+ *
+ * On entry:
+ * r0: this object pointer
+ * r1: comp object pointer
+ *
+ */
+ .extern __memcmp16
+ENTRY art_quick_string_compareto
+ mov r2, r0 @ this to r2, opening up r0 for return value
+ subs r0, r2, r1 @ Same?
+ bxeq lr
+
+ push {r4, r7-r12, lr} @ 8 words - keep alignment
+ .save {r4, r7-r12, lr}
+ .cfi_adjust_cfa_offset 32
+ .cfi_rel_offset r4, 0
+ .cfi_rel_offset r7, 4
+ .cfi_rel_offset r8, 8
+ .cfi_rel_offset r9, 12
+ .cfi_rel_offset r10, 16
+ .cfi_rel_offset r11, 20
+ .cfi_rel_offset r12, 24
+ .cfi_rel_offset lr, 28
+
+ ldr r4, [r2, #STRING_OFFSET_OFFSET]
+ ldr r9, [r1, #STRING_OFFSET_OFFSET]
+ ldr r7, [r2, #STRING_COUNT_OFFSET]
+ ldr r10, [r1, #STRING_COUNT_OFFSET]
+ ldr r2, [r2, #STRING_VALUE_OFFSET]
+ ldr r1, [r1, #STRING_VALUE_OFFSET]
+
+ /*
+ * At this point, we have:
+ * value: r2/r1
+ * offset: r4/r9
+ * count: r7/r10
+ * We're going to compute
+ * r11 <- countDiff
+ * r10 <- minCount
+ */
+ subs r11, r7, r10
+ movls r10, r7
+
+ /* Now, build pointers to the string data */
+ add r2, r2, r4, lsl #1
+ add r1, r1, r9, lsl #1
+ /*
+ * Note: data pointers point to previous element so we can use pre-index
+ * mode with base writeback.
+ */
+ add r2, #STRING_DATA_OFFSET-2 @ offset to contents[-1]
+ add r1, #STRING_DATA_OFFSET-2 @ offset to contents[-1]
+
+ /*
+ * At this point we have:
+ * r2: *this string data
+ * r1: *comp string data
+ * r10: iteration count for comparison
+ * r11: value to return if the first part of the string is equal
+ * r0: reserved for result
+ * r3, r4, r7, r8, r9, r12 available for loading string data
+ */
+
+ subs r10, #2
+ blt do_remainder2
+
+ /*
+ * Unroll the first two checks so we can quickly catch early mismatch
+ * on long strings (but preserve incoming alignment)
+ */
+
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ bne done
+ cmp r10, #28
+ bgt do_memcmp16
+ subs r10, #3
+ blt do_remainder
+
+loopback_triple:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ ldrh r9, [r2, #2]!
+ ldrh r12,[r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ subeqs r0, r9, r12
+ bne done
+ subs r10, #3
+ bge loopback_triple
+
+do_remainder:
+ adds r10, #3
+ beq returnDiff
+
+loopback_single:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ subs r0, r3, r4
+ bne done
+ subs r10, #1
+ bne loopback_single
+
+returnDiff:
+ mov r0, r11
+ pop {r4, r7-r12, pc}
+
+do_remainder2:
+ adds r10, #2
+ bne loopback_single
+ mov r0, r11
+ pop {r4, r7-r12, pc}
+
+ /* Long string case */
+do_memcmp16:
+ mov r7, r11
+ add r0, r2, #2
+ add r1, r1, #2
+ mov r2, r10
+ bl __memcmp16
+ cmp r0, #0
+ moveq r0, r7
+done:
+ pop {r4, r7-r12, pc}
+END art_quick_string_compareto
diff --git a/runtime/oat/runtime/callee_save_frame.h b/runtime/oat/runtime/callee_save_frame.h
new file mode 100644
index 0000000..dd2f3fa
--- /dev/null
+++ b/runtime/oat/runtime/callee_save_frame.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
+#define ART_SRC_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
+
+#include "base/mutex.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace mirror {
+class AbstractMethod;
+} // namespace mirror
+
+// Place a special frame at the TOS that will save the callee saves for the given type.
+static void FinishCalleeSaveFrameSetup(Thread* self, mirror::AbstractMethod** sp,
+ Runtime::CalleeSaveType type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Be aware the store below may well stomp on an incoming argument.
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ *sp = Runtime::Current()->GetCalleeSaveMethod(type);
+ self->SetTopOfStack(sp, 0);
+ self->VerifyStack();
+}
+
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
diff --git a/runtime/oat/runtime/context.cc b/runtime/oat/runtime/context.cc
new file mode 100644
index 0000000..7075e42
--- /dev/null
+++ b/runtime/oat/runtime/context.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context.h"
+
+#if defined(__arm__)
+#include "arm/context_arm.h"
+#elif defined(__mips__)
+#include "mips/context_mips.h"
+#elif defined(__i386__)
+#include "x86/context_x86.h"
+#endif
+
+namespace art {
+
+Context* Context::Create() {
+#if defined(__arm__)
+ return new arm::ArmContext();
+#elif defined(__mips__)
+ return new mips::MipsContext();
+#elif defined(__i386__)
+ return new x86::X86Context();
+#else
+ UNIMPLEMENTED(FATAL);
+#endif
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/context.h b/runtime/oat/runtime/context.h
new file mode 100644
index 0000000..895abf9
--- /dev/null
+++ b/runtime/oat/runtime/context.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_CONTEXT_H_
+#define ART_SRC_OAT_RUNTIME_CONTEXT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace art {
+
+class StackVisitor;
+
+// Representation of a thread's context on the executing machine, used to implement long jumps in
+// the quick stack frame layout.
+class Context {
+ public:
+ // Creates a context for the running architecture
+ static Context* Create();
+
+ virtual ~Context() {}
+
+ // Re-initializes the registers for context re-use.
+ virtual void Reset() = 0;
+
+ // Read values from callee saves in the given frame. The frame also holds
+ // the method that holds the layout.
+ virtual void FillCalleeSaves(const StackVisitor& fr) = 0;
+
+ // Set the stack pointer value
+ virtual void SetSP(uintptr_t new_sp) = 0;
+
+ // Set the program counter value
+ virtual void SetPC(uintptr_t new_pc) = 0;
+
+ // Read the given GPR
+ virtual uintptr_t GetGPR(uint32_t reg) = 0;
+
+ // Set the given GPR.
+ virtual void SetGPR(uint32_t reg, uintptr_t value) = 0;
+
+ // Smash the caller save registers. If we're throwing, we don't want to return bogus values.
+ virtual void SmashCallerSaves() = 0;
+
+ // Switch execution of the executing context to this context
+ virtual void DoLongJump() = 0;
+
+ protected:
+ enum {
+ kBadGprBase = 0xebad6070,
+ kBadFprBase = 0xebad8070,
+ };
+};
+
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_CONTEXT_H_
diff --git a/runtime/oat/runtime/mips/context_mips.cc b/runtime/oat/runtime/mips/context_mips.cc
new file mode 100644
index 0000000..cbd63d8
--- /dev/null
+++ b/runtime/oat/runtime/mips/context_mips.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context_mips.h"
+
+#include "mirror/abstract_method.h"
+#include "mirror/object-inl.h"
+#include "stack.h"
+
+namespace art {
+namespace mips {
+
+static const uint32_t gZero = 0;
+
+void MipsContext::Reset() {
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
+ gprs_[i] = NULL;
+ }
+ for (size_t i = 0; i < kNumberOfFRegisters; i++) {
+ fprs_[i] = NULL;
+ }
+ gprs_[SP] = &sp_;
+ gprs_[RA] = &ra_;
+ // Initialize registers with easy to spot debug values.
+ sp_ = MipsContext::kBadGprBase + SP;
+ ra_ = MipsContext::kBadGprBase + RA;
+}
+
+void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
+ mirror::AbstractMethod* method = fr.GetMethod();
+ uint32_t core_spills = method->GetCoreSpillMask();
+ uint32_t fp_core_spills = method->GetFpSpillMask();
+ size_t spill_count = __builtin_popcount(core_spills);
+ size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+ size_t frame_size = method->GetFrameSizeInBytes();
+ if (spill_count > 0) {
+ // Lowest number spill is farthest away, walk registers and fill into context.
+ int j = 1;
+ for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
+ if (((core_spills >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ j++;
+ }
+ }
+ }
+ if (fp_spill_count > 0) {
+ // Lowest number spill is farthest away, walk registers and fill into context.
+ int j = 1;
+ for (size_t i = 0; i < kNumberOfFRegisters; i++) {
+ if (((fp_core_spills >> i) & 1) != 0) {
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+ j++;
+ }
+ }
+ }
+}
+
+void MipsContext::SetGPR(uint32_t reg, uintptr_t value) {
+ CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
+ CHECK_NE(gprs_[reg], &gZero); // Can't overwrite this static value since they are never reset.
+ CHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
+}
+
+void MipsContext::SmashCallerSaves() {
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[V0] = const_cast<uint32_t*>(&gZero);
+ gprs_[V1] = const_cast<uint32_t*>(&gZero);
+ gprs_[A1] = NULL;
+ gprs_[A2] = NULL;
+ gprs_[A3] = NULL;
+}
+
+extern "C" void art_quick_do_long_jump(uint32_t*, uint32_t*);
+
+void MipsContext::DoLongJump() {
+ uintptr_t gprs[kNumberOfCoreRegisters];
+ uint32_t fprs[kNumberOfFRegisters];
+ for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
+ gprs[i] = gprs_[i] != NULL ? *gprs_[i] : MipsContext::kBadGprBase + i;
+ }
+ for (size_t i = 0; i < kNumberOfFRegisters; ++i) {
+ fprs[i] = fprs_[i] != NULL ? *fprs_[i] : MipsContext::kBadGprBase + i;
+ }
+ art_quick_do_long_jump(gprs, fprs);
+}
+
+} // namespace mips
+} // namespace art
diff --git a/runtime/oat/runtime/mips/context_mips.h b/runtime/oat/runtime/mips/context_mips.h
new file mode 100644
index 0000000..fc8ef96
--- /dev/null
+++ b/runtime/oat/runtime/mips/context_mips.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_
+#define ART_SRC_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_
+
+#include "constants_mips.h"
+#include "oat/runtime/context.h"
+
+namespace art {
+namespace mips {
+
+class MipsContext : public Context {
+ public:
+ MipsContext() {
+ Reset();
+ }
+ virtual ~MipsContext() {}
+
+ virtual void Reset();
+
+ virtual void FillCalleeSaves(const StackVisitor& fr);
+
+ virtual void SetSP(uintptr_t new_sp) {
+ SetGPR(SP, new_sp);
+ }
+
+ virtual void SetPC(uintptr_t new_pc) {
+ SetGPR(RA, new_pc);
+ }
+
+ virtual uintptr_t GetGPR(uint32_t reg) {
+ CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
+ return *gprs_[reg];
+ }
+
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
+ virtual void SmashCallerSaves();
+ virtual void DoLongJump();
+
+ private:
+ // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ uintptr_t* gprs_[kNumberOfCoreRegisters];
+ uint32_t* fprs_[kNumberOfFRegisters];
+ // Hold values for sp and ra (return address) if they are not located within a stack frame.
+ uintptr_t sp_, ra_;
+};
+} // namespace mips
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_
diff --git a/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc b/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc
new file mode 100644
index 0000000..8e06611
--- /dev/null
+++ b/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_support.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
+extern "C" void art_quick_check_cast_from_code(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+
+// Exception entrypoints.
+extern "C" void* GetAndClearException(Thread*);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
+extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
+extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object_from_code(void*);
+extern "C" void art_quick_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+extern "C" int64_t artLmulFromCode(int64_t a, int64_t b);
+extern "C" int64_t artLdivFromCode(int64_t a, int64_t b);
+extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b);
+
+// Math conversions.
+extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT
+extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT
+extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT
+extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE
+extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG
+extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG
+
+// Single-precision FP arithmetics.
+extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
+
+// Double-precision FP arithmetics.
+extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+
+// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
+extern "C" int64_t __divdi3(int64_t, int64_t);
+extern "C" int64_t __moddi3(int64_t, int64_t);
+extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero_from_code();
+extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception_from_code();
+extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+
+void InitEntryPoints(EntryPoints* points) {
+ // Alloc
+ points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+
+ // Cast
+ points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
+ points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ points->pCheckCastFromCode = art_quick_check_cast_from_code;
+
+ // DexCache
+ points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ points->pResolveStringFromCode = art_quick_resolve_string_from_code;
+
+ // Field
+ points->pSet32Instance = art_quick_set32_instance_from_code;
+ points->pSet32Static = art_quick_set32_static_from_code;
+ points->pSet64Instance = art_quick_set64_instance_from_code;
+ points->pSet64Static = art_quick_set64_static_from_code;
+ points->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ points->pSetObjStatic = art_quick_set_obj_static_from_code;
+ points->pGet32Instance = art_quick_get32_instance_from_code;
+ points->pGet64Instance = art_quick_get64_instance_from_code;
+ points->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ points->pGet32Static = art_quick_get32_static_from_code;
+ points->pGet64Static = art_quick_get64_static_from_code;
+ points->pGetObjStatic = art_quick_get_obj_static_from_code;
+
+ // FillArray
+ points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+
+ // JNI
+ points->pJniMethodStart = JniMethodStart;
+ points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ points->pJniMethodEnd = JniMethodEnd;
+ points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ points->pJniMethodEndWithReference = JniMethodEndWithReference;
+ points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ points->pLockObjectFromCode = art_quick_lock_object_from_code;
+ points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+
+ // Math
+ points->pCmpgDouble = CmpgDouble;
+ points->pCmpgFloat = CmpgFloat;
+ points->pCmplDouble = CmplDouble;
+ points->pCmplFloat = CmplFloat;
+ points->pFmod = fmod;
+ points->pL2d = __floatdidf;
+ points->pFmodf = fmodf;
+ points->pL2f = __floatdisf;
+ points->pD2iz = __fixdfsi;
+ points->pF2iz = __fixsfsi;
+ points->pIdivmod = NULL;
+ points->pD2l = art_d2l;
+ points->pF2l = art_f2l;
+ points->pLdiv = artLdivFromCode;
+ points->pLdivmod = artLdivmodFromCode;
+ points->pLmul = artLmulFromCode;
+ points->pShlLong = art_quick_shl_long;
+ points->pShrLong = art_quick_shr_long;
+ points->pUshrLong = art_quick_ushr_long;
+
+ // Interpreter
+ points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
+ // Intrinsics
+ points->pIndexOf = art_quick_indexof;
+ points->pMemcmp16 = __memcmp16;
+ points->pStringCompareTo = art_quick_string_compareto;
+ points->pMemcpy = memcpy;
+
+ // Invocation
+ points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+ points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ points->pCheckSuspendFromCode = CheckSuspendFromCode;
+ points->pTestSuspendFromCode = art_quick_test_suspend;
+
+ // Throws
+ points->pDeliverException = art_quick_deliver_exception_from_code;
+ points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+};
+
+} // namespace art
diff --git a/runtime/oat/runtime/mips/runtime_support_mips.S b/runtime/oat/runtime/mips/runtime_support_mips.S
new file mode 100644
index 0000000..45d583e
--- /dev/null
+++ b/runtime/oat/runtime/mips/runtime_support_mips.S
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support.h"
+
+ .set noreorder
+ .balign 4
+
+ /* Deliver the given exception */
+ .extern artDeliverExceptionFromCode
+ /* Deliver an exception pending on a thread */
+ .extern artDeliverPendingExceptionFromCode
+
+ /* Cache alignment for function entry */
+.macro ENTRY name
+ .type \name, %function
+ .global \name
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
+ /* Generates $gp for function calls */
+.macro GENERATE_GLOBAL_POINTER
+ .cpload $t9
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word padding + 4 open words for args
+ */
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sw $ra, 60($sp)
+ .cfi_rel_offset 31, 60
+ sw $s8, 56($sp)
+ .cfi_rel_offset 30, 56
+ sw $gp, 52($sp)
+ .cfi_rel_offset 28, 52
+ sw $s7, 48($sp)
+ .cfi_rel_offset 23, 48
+ sw $s6, 44($sp)
+ .cfi_rel_offset 22, 44
+ sw $s5, 40($sp)
+ .cfi_rel_offset 21, 40
+ sw $s4, 36($sp)
+ .cfi_rel_offset 20, 36
+ sw $s3, 32($sp)
+ .cfi_rel_offset 19, 32
+ sw $s2, 28($sp)
+ .cfi_rel_offset 18, 28
+ sw $s1, 24($sp)
+ .cfi_rel_offset 17, 24
+ sw $s0, 20($sp)
+ .cfi_rel_offset 16, 20
+ # 1 word for alignment, 4 open words for args $a0-$a3, bottom will hold Method*
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
+ * Does not include rSUSPEND or rSELF
+ * callee-save: $s2-$s8 + $gp + $ra, 9 total + 3 words padding + 4 open words for args
+ */
+.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sw $ra, 60($sp)
+ .cfi_rel_offset 31, 60
+ sw $s8, 56($sp)
+ .cfi_rel_offset 30, 56
+ sw $gp, 52($sp)
+ .cfi_rel_offset 28, 52
+ sw $s7, 48($sp)
+ .cfi_rel_offset 23, 48
+ sw $s6, 44($sp)
+ .cfi_rel_offset 22, 44
+ sw $s5, 40($sp)
+ .cfi_rel_offset 21, 40
+ sw $s4, 36($sp)
+ .cfi_rel_offset 20, 36
+ sw $s3, 32($sp)
+ .cfi_rel_offset 19, 32
+ sw $s2, 28($sp)
+ .cfi_rel_offset 18, 28
+ # 3 words for alignment and extra args, 4 open words for args $a0-$a3, bottom will hold Method*
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ lw $gp, 52($sp)
+ lw $ra, 60($sp)
+ addiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+ lw $gp, 52($sp)
+ lw $ra, 60($sp)
+ jr $ra
+ addiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * callee-save: $a1-$a3, $s2-$s8 + $gp + $ra, 12 total + 3 words padding + method*
+ */
+.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sw $ra, 60($sp)
+ .cfi_rel_offset 31, 60
+ sw $s8, 56($sp)
+ .cfi_rel_offset 30, 56
+ sw $gp, 52($sp)
+ .cfi_rel_offset 28, 52
+ sw $s7, 48($sp)
+ .cfi_rel_offset 23, 48
+ sw $s6, 44($sp)
+ .cfi_rel_offset 22, 44
+ sw $s5, 40($sp)
+ .cfi_rel_offset 21, 40
+ sw $s4, 36($sp)
+ .cfi_rel_offset 20, 36
+ sw $s3, 32($sp)
+ .cfi_rel_offset 19, 32
+ sw $s2, 28($sp)
+ .cfi_rel_offset 18, 28
+ sw $a3, 12($sp)
+ .cfi_rel_offset 7, 12
+ sw $a2, 8($sp)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ # bottom will hold Method*
+.endm
+
+.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ lw $ra, 60($sp) # restore $ra
+ lw $gp, 52($sp) # restore $gp
+ lw $a1, 4($sp) # restore non-callee save $a1
+ lw $a2, 8($sp) # restore non-callee save $a2
+ lw $a3, 12($sp) # restore non-callee save $a3
+ addiu $sp, $sp, 64 # strip frame
+ .cfi_adjust_cfa_offset -64
+.endm
+
+ /*
+ * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_
+ */
+.macro DELIVER_PENDING_EXCEPTION
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
+ move $a0, rSELF # pass Thread::Current
+ la $t9, artDeliverPendingExceptionFromCode
+ jr $t9 # artDeliverPendingExceptionFromCode(Thread*, $sp)
+ move $a1, $sp # pass $sp
+.endm
+
+.macro RETURN_IF_NO_EXCEPTION
+ lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bnez $t0, 1f # success if no exception is pending
+ nop
+ jr $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+.endm
+
+.macro RETURN_IF_ZERO
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ bnez $v0, 1f # success?
+ nop
+ jr $ra # return on success
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+.endm
+
+.macro RETURN_IF_NONZERO
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ beqz $v0, 1f # success?
+ nop
+ jr $ra # return on success
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+.endm
+
+ /*
+ * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
+ * FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
+ */
+ENTRY art_quick_do_long_jump
+ l.s $f0, 0($a1)
+ l.s $f1, 4($a1)
+ l.s $f2, 8($a1)
+ l.s $f3, 12($a1)
+ l.s $f4, 16($a1)
+ l.s $f5, 20($a1)
+ l.s $f6, 24($a1)
+ l.s $f7, 28($a1)
+ l.s $f8, 32($a1)
+ l.s $f9, 36($a1)
+ l.s $f10, 40($a1)
+ l.s $f11, 44($a1)
+ l.s $f12, 48($a1)
+ l.s $f13, 52($a1)
+ l.s $f14, 56($a1)
+ l.s $f15, 60($a1)
+ l.s $f16, 64($a1)
+ l.s $f17, 68($a1)
+ l.s $f18, 72($a1)
+ l.s $f19, 76($a1)
+ l.s $f20, 80($a1)
+ l.s $f21, 84($a1)
+ l.s $f22, 88($a1)
+ l.s $f23, 92($a1)
+ l.s $f24, 96($a1)
+ l.s $f25, 100($a1)
+ l.s $f26, 104($a1)
+ l.s $f27, 108($a1)
+ l.s $f28, 112($a1)
+ l.s $f29, 116($a1)
+ l.s $f30, 120($a1)
+ l.s $f31, 124($a1)
+ lw $at, 4($a0)
+ lw $v0, 8($a0)
+ lw $v1, 12($a0)
+ lw $a1, 20($a0)
+ lw $a2, 24($a0)
+ lw $a3, 28($a0)
+ lw $t0, 32($a0)
+ lw $t1, 36($a0)
+ lw $t2, 40($a0)
+ lw $t3, 44($a0)
+ lw $t4, 48($a0)
+ lw $t5, 52($a0)
+ lw $t6, 56($a0)
+ lw $t7, 60($a0)
+ lw $s0, 64($a0)
+ lw $s1, 68($a0)
+ lw $s2, 72($a0)
+ lw $s3, 76($a0)
+ lw $s4, 80($a0)
+ lw $s5, 84($a0)
+ lw $s6, 88($a0)
+ lw $s7, 92($a0)
+ lw $t8, 96($a0)
+ lw $t9, 100($a0)
+ lw $k0, 104($a0)
+ lw $k1, 108($a0)
+ lw $gp, 112($a0)
+ lw $sp, 116($a0)
+ lw $fp, 120($a0)
+ lw $ra, 124($a0)
+ lw $a0, 16($a0)
+ move $v0, $zero # clear result registers r0 and r1
+ jr $ra # do long jump
+ move $v1, $zero
+END art_quick_do_long_jump
+
+ /*
+ * Called by managed code, saves most registers (forms basis of long jump context) and passes
+ * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
+ * the bottom of the thread. On entry r0 holds Throwable*
+ */
+ENTRY art_quick_deliver_exception_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a1, rSELF # pass Thread::Current
+ la $t9, artDeliverExceptionFromCode
+ jr $t9 # artDeliverExceptionFromCode(Throwable*, Thread*, $sp)
+ move $a2, $sp # pass $sp
+END art_quick_deliver_exception_from_code
+
+ /*
+ * Called by managed code to create and deliver a NullPointerException
+ */
+ .extern artThrowNullPointerExceptionFromCode
+ENTRY art_quick_throw_null_pointer_exception_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a0, rSELF # pass Thread::Current
+ la $t9, artThrowNullPointerExceptionFromCode
+ jr $t9 # artThrowNullPointerExceptionFromCode(Thread*, $sp)
+ move $a1, $sp # pass $sp
+END art_quick_throw_null_pointer_exception_from_code
+
+ /*
+ * Called by managed code to create and deliver an ArithmeticException
+ */
+ .extern artThrowDivZeroFromCode
+ENTRY art_quick_throw_div_zero_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a0, rSELF # pass Thread::Current
+ la $t9, artThrowDivZeroFromCode
+ jr $t9 # artThrowDivZeroFromCode(Thread*, $sp)
+ move $a1, $sp # pass $sp
+END art_quick_throw_div_zero_from_code
+
+ /*
+ * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
+ */
+ .extern artThrowArrayBoundsFromCode
+ENTRY art_quick_throw_array_bounds_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a2, rSELF # pass Thread::Current
+ la $t9, artThrowArrayBoundsFromCode
+ jr $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp)
+ move $a3, $sp # pass $sp
+END art_quick_throw_array_bounds_from_code
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+ .extern artThrowStackOverflowFromCode
+ENTRY art_quick_throw_stack_overflow_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a0, rSELF # pass Thread::Current
+ la $t9, artThrowStackOverflowFromCode
+ jr $t9 # artThrowStackOverflowFromCode(Thread*, $sp)
+ move $a1, $sp # pass $sp
+END art_quick_throw_stack_overflow_from_code
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ .extern artThrowNoSuchMethodFromCode
+ENTRY art_quick_throw_no_such_method_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a1, rSELF # pass Thread::Current
+ la $t9, artThrowNoSuchMethodFromCode
+ jr $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp)
+ move $a2, $sp # pass $sp
+END art_quick_throw_no_such_method_from_code
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
+ *
+ * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
+ * of the target Method* in $v0 and method->code_ in $v1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+ * pointing back to the original caller.
+ */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ lw $a2, 64($sp) # pass caller Method*
+ move $t0, $sp # save $sp
+ addiu $sp, $sp, -32 # make space for extra args
+ .cfi_adjust_cfa_offset 32
+ move $a3, rSELF # pass Thread::Current
+ .cfi_rel_offset 28, 12
+ jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
+ sw $t0, 16($sp) # pass $sp
+ addiu $sp, $sp, 32 # release out args
+ .cfi_adjust_cfa_offset -32
+ move $a0, $v0 # save target Method*
+ move $t9, $v1 # save $v0->code_
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ beqz $v0, 1f
+ nop
+ jr $t9
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END \c_name
+.endm
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+ /*
+ * Common invocation stub for portable and quick.
+ * On entry:
+ * a0 = method pointer
+ * a1 = argument array or NULL for no argument methods
+ * a2 = size of argument array in bytes
+ * a3 = (managed) thread pointer
+ * [sp + 16] = JValue* result
+ * [sp + 20] = result type char
+ */
+ .type art_portable_invoke_stub, %function
+ .global art_portable_invoke_stub
+art_portable_invoke_stub:
+ENTRY art_quick_invoke_stub
+ GENERATE_GLOBAL_POINTER
+ sw $a0, 0($sp) # save out a0
+ addiu $sp, $sp, -16 # spill s0, s1, fp, ra
+ .cfi_adjust_cfa_offset 16
+ sw $ra, 12($sp)
+ .cfi_rel_offset 31, 12
+ sw $fp, 8($sp)
+ .cfi_rel_offset 30, 8
+ sw $s1, 4($sp)
+ .cfi_rel_offset 17, 4
+ sw $s0, 0($sp)
+ .cfi_rel_offset 16, 0
+ move $fp, $sp # save sp in fp
+ .cfi_def_cfa_register 30
+ move $s1, $a3 # move managed thread pointer into s1
+ addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval
+ addiu $t0, $a2, 16 # create space for method pointer in frame
+ srl $t0, $t0, 3 # shift the frame size right 3
+ sll $t0, $t0, 3 # shift the frame size left 3 to align to 16 bytes
+ subu $sp, $sp, $t0 # reserve stack space for argument array
+ addiu $a0, $sp, 4 # pass stack pointer + method ptr as dest for memcpy
+ jal memcpy # (dest, src, bytes)
+ addiu $sp, $sp, -16 # make space for argument slots for memcpy
+ addiu $sp, $sp, 16 # restore stack after memcpy
+ lw $a0, 16($fp) # restore method*
+ lw $a1, 4($sp) # copy arg value for a1
+ lw $a2, 8($sp) # copy arg value for a2
+ lw $a3, 12($sp) # copy arg value for a3
+ lw $t9, METHOD_CODE_OFFSET($a0) # get pointer to the code
+ jalr $t9 # call the method
+ sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ move $sp, $fp # restore the stack
+ lw $s0, 0($sp)
+ lw $s1, 4($sp)
+ lw $fp, 8($sp)
+ lw $ra, 12($sp)
+ addiu $sp, $sp, 16
+ .cfi_adjust_cfa_offset -16
+ lw $t0, 16($sp) # get result pointer
+ lw $t1, 20($sp) # get result type char
+ li $t2, 68 # put char 'D' into t2
+ beq $t1, $t2, 1f # branch if result type char == 'D'
+ li $t3, 70 # put char 'F' into t3
+ beq $t1, $t3, 1f # branch if result type char == 'F'
+ sw $v0, 0($t0) # store the result
+ jr $ra
+ sw $v1, 4($t0) # store the other half of the result
+1:
+ s.s $f0, 0($t0) # store floating point result
+ jr $ra
+ s.s $f1, 4($t0) # store other half of floating point result
+END art_quick_invoke_stub
+ .size art_portable_invoke_stub, .-art_portable_invoke_stub
+
+ /*
+ * Entry point of native methods when JNI bug compatibility is enabled.
+ */
+ .extern artWorkAroundAppJniBugs
+ENTRY art_quick_work_around_app_jni_bugs
+ GENERATE_GLOBAL_POINTER
+ # save registers that may contain arguments and LR that will be crushed by a call
+ addiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 28($sp)
+ .cfi_rel_offset 31, 28
+ sw $a3, 24($sp)
+ .cfi_rel_offset 7, 28
+ sw $a2, 20($sp)
+ .cfi_rel_offset 6, 28
+ sw $a1, 16($sp)
+ .cfi_rel_offset 5, 28
+ sw $a0, 12($sp)
+ .cfi_rel_offset 4, 28
+ move $a0, rSELF # pass Thread::Current
+ jal artWorkAroundAppJniBugs # (Thread*, $sp)
+ move $a1, $sp # pass $sp
+ move $t9, $v0 # save target address
+ lw $a0, 12($sp)
+ lw $a1, 16($sp)
+ lw $a2, 20($sp)
+ lw $a3, 24($sp)
+ lw $ra, 28($sp)
+ jr $t9 # tail call into JNI routine
+ addiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+END art_quick_work_around_app_jni_bugs
+
+ /*
+ * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
+ * failure.
+ */
+ .extern artHandleFillArrayDataFromCode
+ENTRY art_quick_handle_fill_data_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ move $a2, rSELF # pass Thread::Current
+ jal artHandleFillArrayDataFromCode # (Array*, const DexFile::Payload*, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_ZERO
+END art_quick_handle_fill_data_from_code
+
+ /*
+ * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+ */
+ .extern artLockObjectFromCode
+ENTRY art_quick_lock_object_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ move $a1, rSELF # pass Thread::Current
+ jal artLockObjectFromCode # (Object* obj, Thread*, $sp)
+ move $a2, $sp # pass $sp
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_lock_object_from_code
+
+ /*
+ * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+ */
+ .extern artUnlockObjectFromCode
+ENTRY art_quick_unlock_object_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ move $a1, rSELF # pass Thread::Current
+ jal artUnlockObjectFromCode # (Object* obj, Thread*, $sp)
+ move $a2, $sp # pass $sp
+ RETURN_IF_ZERO
+END art_quick_unlock_object_from_code
+
+ /*
+ * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ */
+ .extern artCheckCastFromCode
+ENTRY art_quick_check_cast_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ move $a2, rSELF # pass Thread::Current
+ jal artCheckCastFromCode # (Class* a, Class* b, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_ZERO
+END art_quick_check_cast_from_code
+
+ /*
+ * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
+ * failure.
+ */
+ .extern artCanPutArrayElementFromCode
+ENTRY art_quick_can_put_array_element_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ move $a2, rSELF # pass Thread::Current
+ jal artCanPutArrayElementFromCode # (Object* element, Class* array_class, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_ZERO
+END art_quick_can_put_array_element_from_code
+
+ /*
+ * Entry from managed code when uninitialized static storage, this stub will run the class
+ * initializer and deliver the exception on error. On success the static storage base is
+ * returned.
+ */
+ .extern artInitializeStaticStorageFromCode
+ENTRY art_quick_initialize_static_storage_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
+ jal artInitializeStaticStorageFromCode
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_initialize_static_storage_from_code
+
+ /*
+ * Entry from managed code when dex cache misses for a type_idx.
+ */
+ .extern artInitializeTypeFromCode
+ENTRY art_quick_initialize_type_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
+ jal artInitializeTypeFromCode
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_initialize_type_from_code
+
+ /*
+ * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+ * miss.
+ */
+ .extern artInitializeTypeAndVerifyAccessFromCode
+ENTRY art_quick_initialize_type_and_verify_access_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, $sp)
+ jal artInitializeTypeAndVerifyAccessFromCode
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_initialize_type_and_verify_access_from_code
+
+ /*
+ * Called by managed code to resolve a static field and load a 32-bit primitive value.
+ */
+ .extern artGet32StaticFromCode
+ENTRY art_quick_get32_static_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a1, 64($sp) # pass referrer's Method*
+ move $a2, rSELF # pass Thread::Current
+ jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and load a 64-bit primitive value.
+ */
+ .extern artGet64StaticFromCode
+ENTRY art_quick_get64_static_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a1, 64($sp) # pass referrer's Method*
+ move $a2, rSELF # pass Thread::Current
+ jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and load an object reference.
+ */
+ .extern artGetObjStaticFromCode
+ENTRY art_quick_get_obj_static_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a1, 64($sp) # pass referrer's Method*
+ move $a2, rSELF # pass Thread::Current
+ jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_static_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+ */
+ .extern artGet32InstanceFromCode
+ENTRY art_quick_get32_instance_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a2, 64($sp) # pass referrer's Method*
+ move $a3, rSELF # pass Thread::Current
+ jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+ */
+ .extern artGet64InstanceFromCode
+ENTRY art_quick_get64_instance_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a2, 64($sp) # pass referrer's Method*
+ move $a3, rSELF # pass Thread::Current
+ jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and load an object reference.
+ */
+ .extern artGetObjInstanceFromCode
+ENTRY art_quick_get_obj_instance_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a2, 64($sp) # pass referrer's Method*
+ move $a3, rSELF # pass Thread::Current
+ jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_instance_from_code
+
+ /*
+ * Called by managed code to resolve a static field and store a 32-bit primitive value.
+ */
+ .extern artSet32StaticFromCode
+ENTRY art_quick_set32_static_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a2, 64($sp) # pass referrer's Method*
+ move $a3, rSELF # pass Thread::Current
+ jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp)
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_ZERO
+END art_quick_set32_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and store a 64-bit primitive value.
+ */
+ .extern artSet32StaticFromCode
+ENTRY art_quick_set64_static_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a1, 64($sp) # pass referrer's Method*
+ sw rSELF, 16($sp) # pass Thread::Current
+ jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*, $sp)
+ sw $sp, 20($sp) # pass $sp
+ RETURN_IF_ZERO
+END art_quick_set64_static_from_code
+
+ /*
+ * Called by managed code to resolve a static field and store an object reference.
+ */
+ .extern artSetObjStaticFromCode
+ENTRY art_quick_set_obj_static_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a2, 64($sp) # pass referrer's Method*
+ move $a3, rSELF # pass Thread::Current
+ jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*, $sp)
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_ZERO
+END art_quick_set_obj_static_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set32_instance_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a3, 64($sp) # pass referrer's Method*
+ sw rSELF, 16($sp) # pass Thread::Current
+ jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp)
+ sw $sp, 20($sp) # pass $sp
+ RETURN_IF_ZERO
+END art_quick_set32_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set64_instance_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ sw rSELF, 16($sp) # pass Thread::Current
+ jal artSet64InstanceFromCode # (field_idx, Object*, new_val, Thread*, $sp)
+ sw $sp, 20($sp) # pass $sp
+ RETURN_IF_ZERO
+END art_quick_set64_instance_from_code
+
+ /*
+ * Called by managed code to resolve an instance field and store an object reference.
+ */
+ .extern artSetObjInstanceFromCode
+ENTRY art_quick_set_obj_instance_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lw $a3, 64($sp) # pass referrer's Method*
+ sw rSELF, 16($sp) # pass Thread::Current
+ jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp)
+ sw $sp, 20($sp) # pass $sp
+ RETURN_IF_ZERO
+END art_quick_set_obj_instance_from_code
+
+ /*
+ * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+ * exception on error. On success the String is returned. R0 holds the referring method,
+ * R1 holds the string index. The fast path check for hit in strings cache has already been
+ * performed.
+ */
+ .extern artResolveStringFromCode
+ENTRY art_quick_resolve_string_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
+ jal artResolveStringFromCode
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_resolve_string_from_code
+
+ /*
+ * Called by managed code to allocate an object.
+ */
+ .extern artAllocObjectFromCode
+ENTRY art_quick_alloc_object_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ jal artAllocObjectFromCode # (uint32_t type_idx, Method* method, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_object_from_code
+
+ /*
+ * Called by managed code to allocate an object when the caller doesn't know whether it has
+ * access to the created type.
+ */
+ .extern artAllocObjectFromCodeWithAccessCheck
+ENTRY art_quick_alloc_object_from_code_with_access_check
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, rSELF # pass Thread::Current
+ jal artAllocObjectFromCodeWithAccessCheck # (uint32_t type_idx, Method* method, Thread*, $sp)
+ move $a3, $sp # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_object_from_code_with_access_check
+
+ /*
+ * Called by managed code to allocate an array.
+ */
+ .extern artAllocArrayFromCode
+ENTRY art_quick_alloc_array_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, $sp)
+ jal artAllocArrayFromCode
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_array_from_code
+
+ /*
+ * Called by managed code to allocate an array when the caller doesn't know whether it has
+ * access to the created type.
+ */
+ .extern artAllocArrayFromCodeWithAccessCheck
+ENTRY art_quick_alloc_array_from_code_with_access_check
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, $sp)
+ jal artAllocArrayFromCodeWithAccessCheck
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_alloc_array_from_code_with_access_check
+
+ /*
+ * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+ */
+ .extern artCheckAndAllocArrayFromCode
+ENTRY art_quick_check_and_alloc_array_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , $sp)
+ jal artCheckAndAllocArrayFromCode
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_check_and_alloc_array_from_code
+
+ /*
+ * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+ */
+ .extern artCheckAndAllocArrayFromCodeWithAccessCheck
+ENTRY art_quick_check_and_alloc_array_from_code_with_access_check
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a3, rSELF # pass Thread::Current
+ # artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , $sp)
+ jal artCheckAndAllocArrayFromCodeWithAccessCheck
+ sw $sp, 16($sp) # pass $sp
+ RETURN_IF_NONZERO
+END art_quick_check_and_alloc_array_from_code_with_access_check
+
+ /*
+ * Called by managed code when the value in rSUSPEND has been decremented to 0.
+ */
+ .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+ GENERATE_GLOBAL_POINTER
+ lh $a0, THREAD_FLAGS_OFFSET(rSELF)
+ bnez $a0, 1f
+ addi rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ jr $ra
+ nop
+1:
+ move $a0, rSELF
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ jal artTestSuspendFromCode # (Thread*, $sp)
+ move $a1, $sp
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
+
+ .extern artPortableProxyInvokeHandler
+ENTRY art_portable_proxy_invoke_handler
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ sw $a0, 0($sp) # place proxy method at bottom of frame
+ move $a2, rSELF # pass Thread::Current
+ jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
+ move $a3, $sp # pass $sp
+ lw $ra, 60($sp) # restore $ra
+ jr $ra
+ addiu $sp, $sp, 64 # pop frame
+ .cfi_adjust_cfa_offset -64
+END art_portable_proxy_invoke_handler
+
+ /*
+ * Called by managed code that is attempting to call a method on a proxy class. On entry
+ * r0 holds the proxy method; r1, r2 and r3 may contain arguments.
+ */
+ .extern artQuickProxyInvokeHandler
+ENTRY art_quick_proxy_invoke_handler
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ sw $a0, 0($sp) # place proxy method at bottom of frame
+ move $a2, rSELF # pass Thread::Current
+ jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
+ move $a3, $sp # pass $sp
+ lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ lw $gp, 52($sp) # restore $gp
+ lw $ra, 60($sp) # restore $ra
+ bnez $t0, 1f
+ addiu $sp, $sp, 64 # pop frame
+ .cfi_adjust_cfa_offset -64
+ jr $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_proxy_invoke_handler
+
+ .extern artInterpreterEntry
+ENTRY art_quick_interpreter_entry
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ sw $a0, 0($sp) # place proxy method at bottom of frame
+ move $a1, rSELF # pass Thread::Current
+ jal artInterpreterEntry # (Method* method, Thread*, SP)
+ move $a2, $sp # pass $sp
+ lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ lw $gp, 52($sp) # restore $gp
+ lw $ra, 60($sp) # restore $ra
+ bnez $t0, 1f
+ addiu $sp, $sp, 64 # pop frame
+ .cfi_adjust_cfa_offset -64
+ jr $ra
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_interpreter_entry
+
+ /*
+ * Routine that intercepts method calls and returns.
+ */
+ .extern artInstrumentationMethodEntryFromCode
+ .extern artInstrumentationMethodExitFromCode
+ENTRY art_quick_instrumentation_entry_from_code
+ GENERATE_GLOBAL_POINTER
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ move $t0, $sp # remember bottom of caller's frame
+ addiu $sp, $sp, -32 # space for args, pad (3 words), arguments (5 words)
+ .cfi_adjust_cfa_offset 32
+ sw $a0, 28($sp) # save arg0
+ sw $ra, 16($sp) # pass $ra
+ move $a3, $t0 # pass $sp
+ jal artInstrumentationMethodEntryFromCode # (Method*, Object*, Thread*, SP, LR)
+ move $a2, rSELF # pass Thread::Current
+ move $t9, $v0 # $t9 holds reference to code
+ lw $a0, 28($sp) # restore arg0
+ addiu $sp, $sp, 32 # remove args
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ jalr $t9 # call method
+ nop
+END art_quick_instrumentation_entry_from_code
+ /* intentional fallthrough */
+ .global art_quick_instrumentation_exit_from_code
+art_quick_instrumentation_exit_from_code:
+ .cfi_startproc
+ addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp
+ GENERATE_GLOBAL_POINTER
+ move $t0, $sp # remember bottom of caller's frame
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ addiu $sp, $sp, -48 # save return values and set up args
+ .cfi_adjust_cfa_offset 48
+ sw $v0, 32($sp)
+ .cfi_rel_offset 2, 0
+ sw $v1, 36($sp)
+ .cfi_rel_offset 3, 4
+ s.s $f0, 40($sp)
+ s.s $f1, 44($sp)
+ s.s $f0, 16($sp) # pass fpr result
+ s.s $f1, 20($sp)
+ move $a2, $v0 # pass gpr result
+ move $a3, $v1
+ move $a1, $t0 # pass $sp
+ jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
+ move $a0, rSELF # pass Thread::Current
+ move $t0, $v0 # set aside returned link register
+ move $ra, $v1 # set link register for deoptimization
+ lw $v0, 32($sp) # restore return values
+ lw $v1, 36($sp)
+ l.s $f0, 40($sp)
+ l.s $f1, 44($sp)
+ jr $t0 # return
+ addiu $sp, $sp, 112 # 48 bytes of args + 64 bytes of callee save frame
+ .cfi_adjust_cfa_offset -112
+END art_quick_instrumentation_exit_from_code
+
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimize
+ .extern artEnterInterpreterFromDeoptimize
+ENTRY art_quick_deoptimize
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a0, rSELF # pass Thread::current
+ jal artDeoptimize # artDeoptimize(Thread*, SP)
+ # Returns caller method's frame size.
+ move $a1, $sp # pass $sp
+END art_quick_deoptimize
+
+ /*
+ * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
+ */
+ .extern artThrowAbstractMethodErrorFromCode
+ENTRY art_portable_abstract_method_error_stub
+ GENERATE_GLOBAL_POINTER
+ la $t9, artThrowAbstractMethodErrorFromCode
+ jr $t9 # (Method*, Thread*, SP)
+ move $a1, $s1 # pass Thread::Current
+END art_portable_abstract_method_error_stub
+
+ /*
+ * Quick abstract method error stub. $a0 contains method* on entry.
+ */
+ENTRY art_quick_abstract_method_error_stub
+ GENERATE_GLOBAL_POINTER
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a1, $s1 # pass Thread::Current
+ la $t9, artThrowAbstractMethodErrorFromCode
+ jr $t9 # (Method*, Thread*, SP)
+ move $a2, $sp # pass SP
+END art_quick_abstract_method_error_stub
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+ .extern artFindNativeMethod
+ENTRY art_jni_dlsym_lookup_stub
+ GENERATE_GLOBAL_POINTER
+ addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 16($sp)
+ .cfi_rel_offset 31, 16
+ sw $a3, 12($sp)
+ .cfi_rel_offset 7, 12
+ sw $a2, 8($sp)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ sw $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+ jal artFindNativeMethod # (Thread*)
+ move $a0, $s1 # pass Thread::Current()
+ lw $a0, 0($sp) # restore registers from stack
+ lw $a1, 4($sp)
+ lw $a2, 8($sp)
+ lw $a3, 12($sp)
+ lw $ra, 16($sp)
+ beq $v0, $zero, no_native_code_found
+ addiu $sp, $sp, 32 # restore the stack
+ .cfi_adjust_cfa_offset -32
+ move $t9, $v0 # put method code result in $t9
+ jr $t9 # leaf call to method's code
+ nop
+no_native_code_found:
+ jr $ra
+ nop
+END art_jni_dlsym_lookup_stub
+
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ * On entry:
+ * $a0: low word
+ * $a1: high word
+ * $a2: shift count
+ */
+ENTRY art_quick_shl_long
+ /* shl-long vAA, vBB, vCC */
+ sll $v0, $a0, $a2 # rlo<- alo << (shift&31)
+ not $v1, $a2 # rhi<- 31-shift (shift is 5b)
+ srl $a0, 1
+ srl $a0, $v1 # alo<- alo >> (32-(shift&31))
+ sll $v1, $a1, $a2 # rhi<- ahi << (shift&31)
+ or $v1, $a0 # rhi<- rhi | alo
+ andi $a2, 0x20 # shift< shift & 0x20
+ movn $v1, $v0, $a2 # rhi<- rlo (if shift&0x20)
+ jr $ra
+ movn $v0, $zero, $a2 # rlo<- 0 (if shift&0x20)
+END art_quick_shl_long
+
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ * On entry:
+ * $a0: low word
+ * $a1: high word
+ * $a2: shift count
+ */
+ .global art_quick_shr_long
+ENTRY art_quick_shr_long
+ sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
+ srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
+ sra $a3, $a1, 31 # $a3<- sign(ah)
+ not $a0, $a2 # alo<- 31-shift (shift is 5b)
+ sll $a1, 1
+ sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
+ or $v0, $a1 # rlo<- rlo | ahi
+ andi $a2, 0x20 # shift & 0x20
+ movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20)
+ jr $ra
+ movn $v1, $a3, $a2 # rhi<- sign(ahi) (if shift&0x20)
+END art_quick_shr_long
+
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ * On entry:
+ * r0: low word
+ * r1: high word
+ * r2: shift count
+ */
+ /* ushr-long vAA, vBB, vCC */
+ .global art_quick_ushr_long
+ENTRY art_quick_ushr_long
+ srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31)
+ srl $v0, $a0, $a2 # rlo<- alo >> (shift&31)
+ not $a0, $a2 # alo<- 31-shift (shift is 5b)
+ sll $a1, 1
+ sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
+ or $v0, $a1 # rlo<- rlo | ahi
+ andi $a2, 0x20 # shift & 0x20
+ movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20)
+ jr $ra
+ movn $v1, $zero, $a2 # rhi<- 0 (if shift&0x20)
+END art_quick_ushr_long
+
+ENTRY art_quick_indexof
+ jr $ra
+ nop
+END art_quick_indexof
+
+ENTRY art_quick_string_compareto
+ jr $ra
+ nop
+END art_quick_string_compareto
diff --git a/runtime/oat/runtime/oat_support_entrypoints.h b/runtime/oat/runtime/oat_support_entrypoints.h
new file mode 100644
index 0000000..c1a2587
--- /dev/null
+++ b/runtime/oat/runtime/oat_support_entrypoints.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+#define ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+
+#include "dex_file-inl.h"
+#include "runtime.h"
+
+#define ENTRYPOINT_OFFSET(x) \
+ (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, entrypoints_)) + \
+ static_cast<uintptr_t>(OFFSETOF_MEMBER(EntryPoints, x)))
+
+namespace art {
+namespace mirror {
+class AbstractMethod;
+class Class;
+class Object;
+} // namespace mirror
+class DvmDex;
+class MethodHelper;
+class ShadowFrame;
+class Thread;
+
+struct PACKED(4) EntryPoints {
+ // Alloc
+ void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
+ void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
+ void* (*pAllocObjectFromCode)(uint32_t, void*);
+ void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*);
+ void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
+ void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
+
+ // Cast
+ uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*);
+ void (*pCanPutArrayElementFromCode)(void*, void*);
+ void (*pCheckCastFromCode)(void*, void*);
+
+ // DexCache
+ void* (*pInitializeStaticStorage)(uint32_t, void*);
+ void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*);
+ void* (*pInitializeTypeFromCode)(uint32_t, void*);
+ void* (*pResolveStringFromCode)(void*, uint32_t);
+
+ // Field
+ int (*pSet32Instance)(uint32_t, void*, int32_t); // field_idx, obj, src
+ int (*pSet32Static)(uint32_t, int32_t);
+ int (*pSet64Instance)(uint32_t, void*, int64_t);
+ int (*pSet64Static)(uint32_t, int64_t);
+ int (*pSetObjInstance)(uint32_t, void*, void*);
+ int (*pSetObjStatic)(uint32_t, void*);
+ int32_t (*pGet32Instance)(uint32_t, void*);
+ int32_t (*pGet32Static)(uint32_t);
+ int64_t (*pGet64Instance)(uint32_t, void*);
+ int64_t (*pGet64Static)(uint32_t);
+ void* (*pGetObjInstance)(uint32_t, void*);
+ void* (*pGetObjStatic)(uint32_t);
+
+ // FillArray
+ void (*pHandleFillArrayDataFromCode)(void*, void*);
+
+ // JNI
+ uint32_t (*pJniMethodStart)(Thread*);
+ uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self);
+ void (*pJniMethodEnd)(uint32_t cookie, Thread* self);
+ void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self);
+ mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self);
+ mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie,
+ jobject locked, Thread* self);
+
+ // Locks
+ void (*pLockObjectFromCode)(void*);
+ void (*pUnlockObjectFromCode)(void*);
+
+ // Math
+ int32_t (*pCmpgDouble)(double, double);
+ int32_t (*pCmpgFloat)(float, float);
+ int32_t (*pCmplDouble)(double, double);
+ int32_t (*pCmplFloat)(float, float);
+ double (*pFmod)(double, double);
+ double (*pSqrt)(double);
+ double (*pL2d)(int64_t);
+ float (*pFmodf)(float, float);
+ float (*pL2f)(int64_t);
+ int32_t (*pD2iz)(double);
+ int32_t (*pF2iz)(float);
+ int32_t (*pIdivmod)(int32_t, int32_t);
+ int64_t (*pD2l)(double);
+ int64_t (*pF2l)(float);
+ int64_t (*pLdiv)(int64_t, int64_t);
+ int64_t (*pLdivmod)(int64_t, int64_t);
+ int64_t (*pLmul)(int64_t, int64_t);
+ uint64_t (*pShlLong)(uint64_t, uint32_t);
+ uint64_t (*pShrLong)(uint64_t, uint32_t);
+ uint64_t (*pUshrLong)(uint64_t, uint32_t);
+
+ // Interpreter
+ void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+ void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+ // Intrinsics
+ int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
+ int32_t (*pMemcmp16)(void*, void*, int32_t);
+ int32_t (*pStringCompareTo)(void*, void*);
+ void* (*pMemcpy)(void*, const void*, size_t);
+
+ // Invocation
+ const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
+ mirror::AbstractMethod**, Thread*);
+ const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
+ mirror::AbstractMethod**, Thread*);
+ void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
+ void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
+ void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
+ void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*);
+ void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*);
+ void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
+
+ // Thread
+ void (*pCheckSuspendFromCode)(Thread*); // Stub that is called when the suspend count is non-zero
+ void (*pTestSuspendFromCode)(); // Stub that is periodically called to test the suspend count
+
+ // Throws
+ void (*pDeliverException)(void*);
+ void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
+ void (*pThrowDivZeroFromCode)();
+ void (*pThrowNoSuchMethodFromCode)(int32_t);
+ void (*pThrowNullPointerFromCode)();
+ void (*pThrowStackOverflowFromCode)(void*);
+};
+
+
+// JNI entrypoints.
+extern uint32_t JniMethodStart(Thread* self)
+ UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self)
+ UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+
+extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked, Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+
+// Initialize an entry point data structure.
+void InitEntryPoints(EntryPoints* points);
+
+// Change the debugger entry point in the data structure.
+void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled);
+
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
diff --git a/runtime/oat/runtime/support_alloc.cc b/runtime/oat/runtime/support_alloc.cc
new file mode 100644
index 0000000..f66fc84
--- /dev/null
+++ b/runtime/oat/runtime/support_alloc.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "mirror/class-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "runtime_support.h"
+
+namespace art {
+
+extern "C" mirror::Object* artAllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocObjectFromCode(type_idx, method, self, false);
+}
+
+extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx,
+ mirror::AbstractMethod* method,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocObjectFromCode(type_idx, method, self, true);
+}
+
+extern "C" mirror::Array* artAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method,
+ int32_t component_count, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocArrayFromCode(type_idx, method, component_count, self, false);
+}
+
+extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx,
+ mirror::AbstractMethod* method,
+ int32_t component_count,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return AllocArrayFromCode(type_idx, method, component_count, self, true);
+}
+
+extern "C" mirror::Array* artCheckAndAllocArrayFromCode(uint32_t type_idx,
+ mirror::AbstractMethod* method,
+ int32_t component_count, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false);
+}
+
+extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx,
+ mirror::AbstractMethod* method,
+ int32_t component_count,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true);
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_cast.cc b/runtime/oat/runtime/support_cast.cc
new file mode 100644
index 0000000..fe91e61
--- /dev/null
+++ b/runtime/oat/runtime/support_cast.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "runtime_support.h"
+
+namespace art {
+
+// Assignable test for code, won't throw. Null and equality tests already performed
+extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(klass != NULL);
+ DCHECK(ref_class != NULL);
+ return klass->IsAssignableFrom(ref_class) ? 1 : 0;
+}
+
+// Check whether it is safe to cast one class to the other, throw exception and return -1 on failure
+extern "C" int artCheckCastFromCode(mirror::Class* src_type, mirror::Class* dest_type,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(src_type->IsClass()) << PrettyClass(src_type);
+ DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
+ if (LIKELY(dest_type->IsAssignableFrom(src_type))) {
+ return 0; // Success
+ } else {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ThrowClassCastException(dest_type, src_type);
+ return -1; // Failure
+ }
+}
+
+// Tests whether 'element' can be assigned into an array of type 'array_class'.
+// Returns 0 on success and -1 if an exception is pending.
+extern "C" int artCanPutArrayElementFromCode(const mirror::Object* element,
+ const mirror::Class* array_class,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(array_class != NULL);
+ // element can't be NULL as we catch this is screened in runtime_support
+ mirror::Class* element_class = element->GetClass();
+ mirror::Class* component_type = array_class->GetComponentType();
+ if (LIKELY(component_type->IsAssignableFrom(element_class))) {
+ return 0; // Success
+ } else {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ ThrowArrayStoreException(element_class, array_class);
+ return -1; // Failure
+ }
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_deoptimize.cc b/runtime/oat/runtime/support_deoptimize.cc
new file mode 100644
index 0000000..43fc9d2
--- /dev/null
+++ b/runtime/oat/runtime/support_deoptimize.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_file-inl.h"
+#include "interpreter/interpreter.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "stack.h"
+#include "thread.h"
+#include "verifier/method_verifier.h"
+
+namespace art {
+
+extern "C" void artDeoptimize(Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+ self->QuickDeliverException();
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_dexcache.cc b/runtime/oat/runtime/support_dexcache.cc
new file mode 100644
index 0000000..0af7a62
--- /dev/null
+++ b/runtime/oat/runtime/support_dexcache.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "gc/accounting/card_table-inl.h"
+#include "class_linker-inl.h"
+#include "dex_file-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "runtime_support.h"
+
+namespace art {
+
+extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
+ const mirror::AbstractMethod* referrer,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called to ensure static storage base is initialized for direct static field reads and writes.
+ // A class may be accessing another class' fields when it doesn't have access, as access has been
+ // given by inheritance.
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return ResolveVerifyAndClinit(type_idx, referrer, self, true, false);
+}
+
+extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
+ const mirror::AbstractMethod* referrer,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called when method->dex_cache_resolved_types_[] misses.
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return ResolveVerifyAndClinit(type_idx, referrer, self, false, false);
+}
+
+extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
+ const mirror::AbstractMethod* referrer,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called when caller isn't guaranteed to have access to a type and the dex cache may be
+ // unpopulated.
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return ResolveVerifyAndClinit(type_idx, referrer, self, false, true);
+}
+
+extern "C" mirror::String* artResolveStringFromCode(mirror::AbstractMethod* referrer,
+ int32_t string_idx,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ return ResolveStringFromCode(referrer, string_idx);
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_field.cc b/runtime/oat/runtime/support_field.cc
new file mode 100644
index 0000000..c20326c
--- /dev/null
+++ b/runtime/oat/runtime/support_field.cc
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_file-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "runtime_support.h"
+
+#include <stdint.h>
+
+namespace art {
+
+extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
+ const mirror::AbstractMethod* referrer,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get32(field->GetDeclaringClass());
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int32_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get32(field->GetDeclaringClass());
+ }
+ return 0; // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
+ const mirror::AbstractMethod* referrer,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get64(field->GetDeclaringClass());
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int64_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get64(field->GetDeclaringClass());
+ }
+ return 0; // Will throw exception by checking with Thread::Current
+}
+
+extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
+ const mirror::AbstractMethod* referrer,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(field->GetDeclaringClass());
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(field->GetDeclaringClass());
+ }
+ return NULL; // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+ const mirror::AbstractMethod* referrer, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
+ if (LIKELY(field != NULL && obj != NULL)) {
+ return field->Get32(obj);
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int32_t), true);
+ if (LIKELY(field != NULL)) {
+ if (UNLIKELY(obj == NULL)) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ } else {
+ return field->Get32(obj);
+ }
+ }
+ return 0; // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+ const mirror::AbstractMethod* referrer, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
+ if (LIKELY(field != NULL && obj != NULL)) {
+ return field->Get64(obj);
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveRead, sizeof(int64_t), true);
+ if (LIKELY(field != NULL)) {
+ if (UNLIKELY(obj == NULL)) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ } else {
+ return field->Get64(obj);
+ }
+ }
+ return 0; // Will throw exception by checking with Thread::Current
+}
+
+extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+ const mirror::AbstractMethod* referrer,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::Object*));
+ if (LIKELY(field != NULL && obj != NULL)) {
+ return field->GetObj(obj);
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectRead, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ if (UNLIKELY(obj == NULL)) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ } else {
+ return field->GetObj(obj);
+ }
+ }
+ return NULL; // Will throw exception by checking with Thread::Current
+}
+
+extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
+ const mirror::AbstractMethod* referrer, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
+ if (LIKELY(field != NULL)) {
+ field->Set32(field->GetDeclaringClass(), new_value);
+ return 0; // success
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int32_t), true);
+ if (LIKELY(field != NULL)) {
+ field->Set32(field->GetDeclaringClass(), new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet64StaticFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer,
+ uint64_t new_value, Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
+ if (LIKELY(field != NULL)) {
+ field->Set64(field->GetDeclaringClass(), new_value);
+ return 0; // success
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int64_t), true);
+ if (LIKELY(field != NULL)) {
+ field->Set64(field->GetDeclaringClass(), new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value,
+ const mirror::AbstractMethod* referrer, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ if (LIKELY(!FieldHelper(field).IsPrimitiveType())) {
+ field->SetObj(field->GetDeclaringClass(), new_value);
+ return 0; // success
+ }
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ field->SetObj(field->GetDeclaringClass(), new_value);
+ return 0; // success
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
+ const mirror::AbstractMethod* referrer, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
+ if (LIKELY(field != NULL && obj != NULL)) {
+ field->Set32(obj, new_value);
+ return 0; // success
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int32_t), true);
+ if (LIKELY(field != NULL)) {
+ if (UNLIKELY(obj == NULL)) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ } else {
+ field->Set32(obj, new_value);
+ return 0; // success
+ }
+ }
+ return -1; // failure
+}
+
+extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly);
+ mirror::AbstractMethod* referrer =
+ sp[callee_save->GetFrameSizeInBytes() / sizeof(mirror::AbstractMethod*)];
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+ sizeof(int64_t));
+ if (LIKELY(field != NULL && obj != NULL)) {
+ field->Set64(obj, new_value);
+ return 0; // success
+ }
+ *sp = callee_save;
+ self->SetTopOfStack(sp, 0);
+ field = FindFieldFromCode(field_idx, referrer, self, InstancePrimitiveWrite, sizeof(int64_t), true);
+ if (LIKELY(field != NULL)) {
+ if (UNLIKELY(obj == NULL)) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ } else {
+ field->Set64(obj, new_value);
+ return 0; // success
+ }
+ }
+ return -1; // failure
+}
+
+extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+ mirror::Object* new_value,
+ const mirror::AbstractMethod* referrer, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL && obj != NULL)) {
+ field->SetObj(obj, new_value);
+ return 0; // success
+ }
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ field = FindFieldFromCode(field_idx, referrer, self, InstanceObjectWrite,
+ sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ if (UNLIKELY(obj == NULL)) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ } else {
+ field->SetObj(obj, new_value);
+ return 0; // success
+ }
+ }
+ return -1; // failure
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_fillarray.cc b/runtime/oat/runtime/support_fillarray.cc
new file mode 100644
index 0000000..a0b06fb
--- /dev/null
+++ b/runtime/oat/runtime/support_fillarray.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "common_throws.h"
+#include "dex_instruction.h"
+#include "mirror/array.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+/*
+ * Fill the array with predefined constant values, throwing exceptions if the array is null or
+ * not of sufficient length.
+ *
+ * NOTE: When dealing with a raw dex file, the data to be copied uses
+ * little-endian ordering. Require that oat2dex do any required swapping
+ * so this routine can get by with a memcpy().
+ *
+ * Format of the data:
+ * ushort ident = 0x0300 magic value
+ * ushort width width of each element in the table
+ * uint size number of elements in the table
+ * ubyte data[size*width] table of data values (may contain a single-byte
+ * padding at the end)
+ */
+extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array,
+ const Instruction::ArrayDataPayload* payload,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ return -1; // Error
+ }
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count);
+ return -1; // Error
+ }
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ return 0; // Success
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_instrumentation.cc b/runtime/oat/runtime/support_instrumentation.cc
new file mode 100644
index 0000000..1f1b952
--- /dev/null
+++ b/runtime/oat/runtime/support_instrumentation.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "instrumentation.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "runtime.h"
+#include "thread-inl.h"
+
+namespace art {
+
+extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::AbstractMethod* method,
+ mirror::Object* this_object,
+ Thread* self,
+ mirror::AbstractMethod** sp,
+ uintptr_t lr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ const void* result = instrumentation->GetQuickCodeFor(method);
+ bool interpreter_entry = (result == GetInterpreterEntryPoint());
+ instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object,
+ method, lr, interpreter_entry);
+ CHECK(result != NULL) << PrettyMethod(method);
+ return result;
+}
+
+extern "C" uint64_t artInstrumentationMethodExitFromCode(Thread* self, mirror::AbstractMethod** sp,
+ uint64_t gpr_result, uint64_t fpr_result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // TODO: use FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly) not the hand inlined below.
+ // We use the hand inline version to ensure the return_pc is assigned before verifying the
+ // stack.
+ // Be aware the store below may well stomp on an incoming argument.
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ mirror::AbstractMethod* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly);
+ *sp = callee_save;
+ uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) +
+ callee_save->GetReturnPcOffsetInBytes());
+ CHECK(*return_pc == 0);
+ self->SetTopOfStack(sp, 0);
+ self->VerifyStack();
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ uint64_t return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(self, return_pc,
+ gpr_result,
+ fpr_result);
+ self->VerifyStack();
+ return return_or_deoptimize_pc;
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_interpreter.cc b/runtime/oat/runtime/support_interpreter.cc
new file mode 100644
index 0000000..55be54f
--- /dev/null
+++ b/runtime/oat/runtime/support_interpreter.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "argument_visitor.h"
+#include "callee_save_frame.h"
+#include "dex_file-inl.h"
+#include "interpreter/interpreter.h"
+#include "invoke_arg_array_builder.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+
+namespace art {
+
+// Visits arguments on the stack placing them into the shadow frame.
+class BuildShadowFrameVisitor : public QuickArgumentVisitor {
+ public:
+ BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
+ ShadowFrame& sf, size_t first_arg_reg) :
+ QuickArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {}
+
+ virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ if (IsSplitLongOrDouble()) {
+ sf_.SetVRegLong(cur_reg_, ReadSplitLongParam());
+ } else {
+ sf_.SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
+ }
+ ++cur_reg_;
+ break;
+ case Primitive::kPrimNot:
+ sf_.SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ sf_.SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ break;
+ }
+ ++cur_reg_;
+ }
+
+ private:
+ ShadowFrame& sf_;
+ size_t cur_reg_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor);
+};
+
+extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ensure we don't get thread suspension until the object arguments are safely in the shadow
+ // frame.
+ const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+
+ MethodHelper mh(method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ uint16_t num_regs = code_item->registers_size_;
+ void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
+ ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick.
+ method, 0, memory));
+ size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
+ BuildShadowFrameVisitor shadow_frame_builder(mh, sp, *shadow_frame, first_arg_reg);
+ shadow_frame_builder.VisitArguments();
+ // Push a transition back into managed code onto the linked list in thread.
+ ManagedStack fragment;
+ self->PushManagedStackFragment(&fragment);
+ self->PushShadowFrame(shadow_frame);
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
+ // Ensure static method's class is initialized.
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
+ true, true)) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ self->PopManagedStackFragment(fragment);
+ return 0;
+ }
+ }
+
+ JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
+ // Pop transition.
+ self->PopManagedStackFragment(fragment);
+ return result.GetJ();
+}
+
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* method = shadow_frame->GetMethod();
+ uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArray(shadow_frame, arg_offset);
+ method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_invoke.cc b/runtime/oat/runtime/support_invoke.cc
new file mode 100644
index 0000000..6a95f3c
--- /dev/null
+++ b/runtime/oat/runtime/support_invoke.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_instruction-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "runtime_support.h"
+
+namespace art {
+
+// Determine target of interface dispatch. This object is known non-null.
+extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::AbstractMethod* interface_method,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* method;
+ if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex16)) {
+ method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
+ if (UNLIKELY(method == NULL)) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
+ caller_method);
+ return 0; // Failure.
+ }
+ } else {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
+ // Determine method index from calling dex instruction.
+#if defined(__arm__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | LR |
+ // | ... | callee saves
+ // | R3 | arg3
+ // | R2 | arg2
+ // | R1 | arg1
+ // | R0 |
+ // | Method* | <- sp
+ DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
+ uintptr_t caller_pc = regs[10];
+#elif defined(__i386__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | Return |
+ // | EBP,ESI,EDI | callee saves
+ // | EBX | arg3
+ // | EDX | arg2
+ // | ECX | arg1
+ // | EAX/Method* | <- sp
+ DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
+ uintptr_t caller_pc = regs[7];
+#elif defined(__mips__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | RA |
+ // | ... | callee saves
+ // | A3 | arg3
+ // | A2 | arg2
+ // | A1 | arg1
+ // | A0/Method* | <- sp
+ DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
+ uintptr_t caller_pc = regs[15];
+#else
+ UNIMPLEMENTED(FATAL);
+ uintptr_t caller_pc = 0;
+#endif
+ uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
+ const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem();
+ CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
+ instr_code == Instruction::INVOKE_INTERFACE_RANGE)
+ << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
+ uint32_t dex_method_idx;
+ if (instr_code == Instruction::INVOKE_INTERFACE) {
+ dex_method_idx = instr->VRegB_35c();
+ } else {
+ DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
+ dex_method_idx = instr->VRegB_3rc();
+ }
+ method = FindMethodFromCode(dex_method_idx, this_object, caller_method, self,
+ false, kInterface);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return 0; // Failure.
+ }
+ }
+ const void* code = method->GetEntryPointFromCompiledCode();
+
+#ifndef NDEBUG
+ // When we return, the caller will branch to this address, so it had better not be 0!
+ if (UNLIKELY(code == NULL)) {
+ MethodHelper mh(method);
+ LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
+ << " location: " << mh.GetDexFile().GetLocation();
+ }
+#endif
+
+ uint32_t method_uint = reinterpret_cast<uint32_t>(method);
+ uint64_t code_uint = reinterpret_cast<uint32_t>(code);
+ uint64_t result = ((code_uint << 32) | method_uint);
+ return result;
+}
+
+
+static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self, mirror::AbstractMethod** sp, bool access_check,
+ InvokeType type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* method = FindMethodFast(method_idx, this_object, caller_method,
+ access_check, type);
+ if (UNLIKELY(method == NULL)) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return 0; // failure
+ }
+ }
+ DCHECK(!self->IsExceptionPending());
+ const void* code = method->GetEntryPointFromCompiledCode();
+
+#ifndef NDEBUG
+ // When we return, the caller will branch to this address, so it had better not be 0!
+ if (UNLIKELY(code == NULL)) {
+ MethodHelper mh(method);
+ LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
+ << " location: " << mh.GetDexFile().GetLocation();
+ }
+#endif
+
+ uint32_t method_uint = reinterpret_cast<uint32_t>(method);
+ uint64_t code_uint = reinterpret_cast<uint32_t>(code);
+ uint64_t result = ((code_uint << 32) | method_uint);
+ return result;
+}
+
+// See comments in runtime_support_asm.S
+extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface);
+}
+
+
+extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect);
+}
+
+extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic);
+}
+
+extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper);
+}
+
+extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual);
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_jni.cc b/runtime/oat/runtime/support_jni.cc
new file mode 100644
index 0000000..8f0f7ca
--- /dev/null
+++ b/runtime/oat/runtime/support_jni.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "runtime_support.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
+extern uint32_t JniMethodStart(Thread* self) {
+ JNIEnvExt* env = self->GetJniEnv();
+ DCHECK(env != NULL);
+ uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ env->local_ref_cookie = env->locals.GetSegmentState();
+ self->TransitionFromRunnableToSuspended(kNative);
+ return saved_local_ref_cookie;
+}
+
+extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) {
+ self->DecodeJObject(to_lock)->MonitorEnter(self);
+ return JniMethodStart(self);
+}
+
+static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
+ JNIEnvExt* env = self->GetJniEnv();
+ env->locals.SetSegmentState(env->local_ref_cookie);
+ env->local_ref_cookie = saved_local_ref_cookie;
+ self->PopSirt();
+}
+
+extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) {
+ self->TransitionFromSuspendedToRunnable();
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+
+extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked,
+ Thread* self) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
+ Thread* self) {
+ self->TransitionFromSuspendedToRunnable();
+ mirror::Object* o = self->DecodeJObject(result); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked, Thread* self) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ mirror::Object* o = self->DecodeJObject(result);
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) {
+ intptr_t value = *arg_ptr;
+ mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
+ mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
+ CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep))
+ << value_as_work_around_rep;
+ *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
+}
+
+extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){
+ DCHECK(Thread::Current() == self);
+ // TODO: this code is specific to ARM
+ // On entry the stack pointed by sp is:
+ // | arg3 | <- Calling JNI method's frame (and extra bit for out args)
+ // | LR |
+ // | R3 | arg2
+ // | R2 | arg1
+ // | R1 | jclass/jobject
+ // | R0 | JNIEnv
+ // | unused |
+ // | unused |
+ // | unused | <- sp
+ mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL);
+ DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
+ intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack
+ // Fix up this/jclass argument
+ WorkAroundJniBugsForJobject(arg_ptr);
+ arg_ptr++;
+ // Fix up jobject arguments
+ MethodHelper mh(jni_method);
+ int reg_num = 2; // Current register being processed, -1 for stack arguments.
+ for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
+ char shorty_char = mh.GetShorty()[i];
+ if (shorty_char == 'L') {
+ WorkAroundJniBugsForJobject(arg_ptr);
+ }
+ if (shorty_char == 'J' || shorty_char == 'D') {
+ if (reg_num == 2) {
+ arg_ptr = sp + 8; // skip to out arguments
+ reg_num = -1;
+ } else if (reg_num == 3) {
+ arg_ptr = sp + 10; // skip to out arguments plus 2 slots as long must be aligned
+ reg_num = -1;
+ } else {
+ DCHECK_EQ(reg_num, -1);
+ if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
+ arg_ptr += 3; // unaligned, pad and move through stack arguments
+ } else {
+ arg_ptr += 2; // aligned, move through stack arguments
+ }
+ }
+ } else {
+ if (reg_num == 2) {
+ arg_ptr++; // move through register arguments
+ reg_num++;
+ } else if (reg_num == 3) {
+ arg_ptr = sp + 8; // skip to outgoing stack arguments
+ reg_num = -1;
+ } else {
+ DCHECK_EQ(reg_num, -1);
+ arg_ptr++; // move through stack arguments
+ }
+ }
+ }
+ // Load expected destination, see Method::RegisterNative
+ const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
+ if (UNLIKELY(code == NULL)) {
+ code = GetJniDlsymLookupStub();
+ jni_method->RegisterNative(self, code);
+ }
+ return code;
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_locks.cc b/runtime/oat/runtime/support_locks.cc
new file mode 100644
index 0000000..79bb7a6
--- /dev/null
+++ b/runtime/oat/runtime/support_locks.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self,
+ mirror::AbstractMethod** sp)
+ UNLOCK_FUNCTION(monitor_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+ DCHECK(obj != NULL); // Assumed to have been checked before entry
+ // MonitorExit may throw exception
+ return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */;
+}
+
+extern "C" void artLockObjectFromCode(mirror::Object* obj, Thread* thread,
+ mirror::AbstractMethod** sp)
+ EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
+ FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
+ DCHECK(obj != NULL); // Assumed to have been checked before entry
+ obj->MonitorEnter(thread); // May block
+ DCHECK(thread->HoldsLock(obj));
+ // Only possible exception is NPE and is handled before entry
+ DCHECK(!thread->IsExceptionPending());
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_math.cc b/runtime/oat/runtime/support_math.cc
new file mode 100644
index 0000000..0bfe59d
--- /dev/null
+++ b/runtime/oat/runtime/support_math.cc
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+namespace art {
+
+int CmplFloat(float a, float b) {
+ if (a == b) {
+ return 0;
+ } else if (a < b) {
+ return -1;
+ } else if (a > b) {
+ return 1;
+ }
+ return -1;
+}
+
+int CmpgFloat(float a, float b) {
+ if (a == b) {
+ return 0;
+ } else if (a < b) {
+ return -1;
+ } else if (a > b) {
+ return 1;
+ }
+ return 1;
+}
+
+int CmpgDouble(double a, double b) {
+ if (a == b) {
+ return 0;
+ } else if (a < b) {
+ return -1;
+ } else if (a > b) {
+ return 1;
+ }
+ return 1;
+}
+
+int CmplDouble(double a, double b) {
+ if (a == b) {
+ return 0;
+ } else if (a < b) {
+ return -1;
+ } else if (a > b) {
+ return 1;
+ }
+ return -1;
+}
+
+extern "C" int64_t artLmulFromCode(int64_t a, int64_t b) {
+ return a * b;
+}
+
+extern "C" int64_t artLdivFromCode(int64_t a, int64_t b) {
+ return a / b;
+}
+
+extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b) {
+ return a % b;
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_proxy.cc b/runtime/oat/runtime/support_proxy.cc
new file mode 100644
index 0000000..d4d0ca1
--- /dev/null
+++ b/runtime/oat/runtime/support_proxy.cc
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "argument_visitor.h"
+#include "dex_file-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "reflection.h"
+#include "runtime_support.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+#include "ScopedLocalRef.h"
+
+namespace art {
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildPortableArgumentVisitor : public PortableArgumentVisitor {
+ public:
+ BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
+ ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
+ PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
+
+ virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jvalue val;
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimNot: {
+ mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+ val.l = soa_.AddLocalReference<jobject>(obj);
+ break;
+ }
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ val.i = *reinterpret_cast<jint*>(GetParamAddress());
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ val.j = 0;
+ break;
+ }
+ args_.push_back(val);
+ }
+
+ private:
+ ScopedObjectAccessUnchecked& soa_;
+ std::vector<jvalue>& args_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor);
+};
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
+ public:
+ BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
+ ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
+ QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
+
+ virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jvalue val;
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimNot: {
+ mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+ val.l = soa_.AddLocalReference<jobject>(obj);
+ break;
+ }
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ if (IsSplitLongOrDouble()) {
+ val.j = ReadSplitLongParam();
+ } else {
+ val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+ }
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ val.i = *reinterpret_cast<jint*>(GetParamAddress());
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ val.j = 0;
+ break;
+ }
+ args_.push_back(val);
+ }
+
+ private:
+ ScopedObjectAccessUnchecked& soa_;
+ std::vector<jvalue>& args_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
+};
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+ mirror::Object* receiver,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+ const char* old_cause =
+ self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+ self->VerifyStack();
+ // Start new JNI local reference state.
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ // Create local ref. copies of proxy method and the receiver.
+ jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+ // Placing arguments into args vector and remove the receiver.
+ MethodHelper proxy_mh(proxy_method);
+ std::vector<jvalue> args;
+ BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
+ local_ref_visitor.VisitArguments();
+ args.erase(args.begin());
+
+ // Convert proxy method into expected interface method.
+ mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+ DCHECK(interface_method != NULL);
+ DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+ // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+ // that performs allocations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+ rcvr_jobj, interface_method_jobj, args);
+ return result.GetJ();
+}
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+ mirror::Object* receiver,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+ const char* old_cause =
+ self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+ // Register the top of the managed stack, making stack crawlable.
+ DCHECK_EQ(*sp, proxy_method);
+ self->SetTopOfStack(sp, 0);
+ DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ self->VerifyStack();
+ // Start new JNI local reference state.
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ // Create local ref. copies of proxy method and the receiver.
+ jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+ // Placing arguments into args vector and remove the receiver.
+ MethodHelper proxy_mh(proxy_method);
+ std::vector<jvalue> args;
+ BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
+ local_ref_visitor.VisitArguments();
+ args.erase(args.begin());
+
+ // Convert proxy method into expected interface method.
+ mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+ DCHECK(interface_method != NULL);
+ DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+ // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+ // that performs allocations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+ rcvr_jobj, interface_method_jobj, args);
+ return result.GetJ();
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_stubs.cc b/runtime/oat/runtime/support_stubs.cc
new file mode 100644
index 0000000..096cb9c
--- /dev/null
+++ b/runtime/oat/runtime/support_stubs.cc
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "class_linker-inl.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "scoped_thread_state_change.h"
+
+// Architecture specific assembler helper to deliver exception.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+
+namespace art {
+
+// Lazily resolve a method for portable. Called by stub code.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** called_addr,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t dex_pc;
+ mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc);
+
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ InvokeType invoke_type;
+ bool is_range;
+ if (called->IsRuntimeMethod()) {
+ const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
+ CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ switch (instr_code) {
+ case Instruction::INVOKE_DIRECT:
+ invoke_type = kDirect;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_STATIC:
+ invoke_type = kStatic;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ invoke_type = kSuper;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ invoke_type = kVirtual;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ invoke_type = kInterface;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ is_range = true;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
+ // Avoid used uninitialized warnings.
+ invoke_type = kDirect;
+ is_range = true;
+ }
+ uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
+ called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+ // Refine called method based on receiver.
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else if (invoke_type == kInterface) {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
+ } else {
+ CHECK(called->IsStatic()) << PrettyMethod(called);
+ invoke_type = kStatic;
+ }
+ const void* code = NULL;
+ if (LIKELY(!thread->IsExceptionPending())) {
+ // Incompatible class change should have been handled in resolve method.
+ CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+ // Ensure that the called method's class is initialized.
+ mirror::Class* called_class = called->GetDeclaringClass();
+ linker->EnsureInitialized(called_class, true, true);
+ if (LIKELY(called_class->IsInitialized())) {
+ code = called->GetEntryPointFromCompiledCode();
+ // TODO: remove this after we solve the link issue.
+ { // for lazy link.
+ if (code == NULL) {
+ code = linker->GetOatCodeFor(called);
+ }
+ }
+ } else if (called_class->IsInitializing()) {
+ if (invoke_type == kStatic) {
+ // Class is still initializing, go to oat and grab code (trampoline must be left in place
+ // until class is initialized to stop races between threads).
+ code = linker->GetOatCodeFor(called);
+ } else {
+ // No trampoline for non-static methods.
+ code = called->GetEntryPointFromCompiledCode();
+ // TODO: remove this after we solve the link issue.
+ { // for lazy link.
+ if (code == NULL) {
+ code = linker->GetOatCodeFor(called);
+ }
+ }
+ }
+ } else {
+ DCHECK(called_class->IsErroneous());
+ }
+ }
+ if (LIKELY(code != NULL)) {
+ // Expect class to at least be initializing.
+ DCHECK(called->GetDeclaringClass()->IsInitializing());
+ // Don't want infinite recursion.
+ DCHECK(code != GetResolutionTrampoline(linker));
+ // Set up entry into main method
+ *called_addr = called;
+ }
+ return code;
+}
+
+// Lazily resolve a method for quick. Called by stub code.
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__arm__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | LR |
+ // | ... | callee saves
+ // | R3 | arg3
+ // | R2 | arg2
+ // | R1 | arg1
+ // | R0 |
+ // | Method* | <- sp
+ DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 48);
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
+ uint32_t pc_offset = 10;
+ uintptr_t caller_pc = regs[pc_offset];
+#elif defined(__i386__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | Return |
+ // | EBP,ESI,EDI | callee saves
+ // | EBX | arg3
+ // | EDX | arg2
+ // | ECX | arg1
+ // | EAX/Method* | <- sp
+ DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 32);
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
+ uintptr_t caller_pc = regs[7];
+#elif defined(__mips__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | RA |
+ // | ... | callee saves
+ // | A3 | arg3
+ // | A2 | arg2
+ // | A1 | arg1
+ // | A0/Method* | <- sp
+ DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 64);
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
+ uint32_t pc_offset = 15;
+ uintptr_t caller_pc = regs[pc_offset];
+#else
+ UNIMPLEMENTED(FATAL);
+ mirror::AbstractMethod** caller_sp = NULL;
+ uintptr_t* regs = NULL;
+ uintptr_t caller_pc = 0;
+#endif
+ FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
+ // Start new JNI local reference state
+ JNIEnvExt* env = thread->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+
+ // Compute details about the called method (avoid GCs)
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ mirror::AbstractMethod* caller = *caller_sp;
+ InvokeType invoke_type;
+ uint32_t dex_method_idx;
+#if !defined(__i386__)
+ const char* shorty;
+ uint32_t shorty_len;
+#endif
+ if (called->IsRuntimeMethod()) {
+ uint32_t dex_pc = caller->ToDexPc(caller_pc);
+ const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
+ CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ bool is_range;
+ switch (instr_code) {
+ case Instruction::INVOKE_DIRECT:
+ invoke_type = kDirect;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_STATIC:
+ invoke_type = kStatic;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ invoke_type = kSuper;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ invoke_type = kVirtual;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ invoke_type = kInterface;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ is_range = true;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
+ // Avoid used uninitialized warnings.
+ invoke_type = kDirect;
+ is_range = false;
+ }
+ dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
+#if !defined(__i386__)
+ shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len);
+#endif
+ } else {
+ invoke_type = kStatic;
+ dex_method_idx = called->GetDexMethodIndex();
+#if !defined(__i386__)
+ MethodHelper mh(called);
+ shorty = mh.GetShorty();
+ shorty_len = mh.GetShortyLength();
+#endif
+ }
+#if !defined(__i386__)
+ // Discover shorty (avoid GCs)
+ size_t args_in_regs = 0;
+ for (size_t i = 1; i < shorty_len; i++) {
+ char c = shorty[i];
+ args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1);
+ if (args_in_regs > 3) {
+ args_in_regs = 3;
+ break;
+ }
+ }
+ // Place into local references incoming arguments from the caller's register arguments
+ size_t cur_arg = 1; // skip method_idx in R0, first arg is in R1
+ if (invoke_type != kStatic) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
+ cur_arg++;
+ if (args_in_regs < 3) {
+ // If we thought we had fewer than 3 arguments in registers, account for the receiver
+ args_in_regs++;
+ }
+ soa.AddLocalReference<jobject>(obj);
+ }
+ size_t shorty_index = 1; // skip return value
+ // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip
+ // R0)
+ while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) {
+ char c = shorty[shorty_index];
+ shorty_index++;
+ if (c == 'L') {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
+ soa.AddLocalReference<jobject>(obj);
+ }
+ cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
+ }
+ // Place into local references incoming arguments from the caller's stack arguments
+ cur_arg += pc_offset + 1; // skip LR/RA, Method* and spills for R1-R3/A1-A3 and callee saves
+ while (shorty_index < shorty_len) {
+ char c = shorty[shorty_index];
+ shorty_index++;
+ if (c == 'L') {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
+ soa.AddLocalReference<jobject>(obj);
+ }
+ cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
+ }
+#endif
+ // Resolve method filling in dex cache
+ if (called->IsRuntimeMethod()) {
+ called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+ }
+ const void* code = NULL;
+ if (LIKELY(!thread->IsExceptionPending())) {
+ // Incompatible class change should have been handled in resolve method.
+ CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+ // Refine called method based on receiver.
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else if (invoke_type == kInterface) {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
+ // Ensure that the called method's class is initialized.
+ mirror::Class* called_class = called->GetDeclaringClass();
+ linker->EnsureInitialized(called_class, true, true);
+ if (LIKELY(called_class->IsInitialized())) {
+ code = called->GetEntryPointFromCompiledCode();
+ } else if (called_class->IsInitializing()) {
+ if (invoke_type == kStatic) {
+ // Class is still initializing, go to oat and grab code (trampoline must be left in place
+ // until class is initialized to stop races between threads).
+ code = linker->GetOatCodeFor(called);
+ } else {
+ // No trampoline for non-static methods.
+ code = called->GetEntryPointFromCompiledCode();
+ }
+ } else {
+ DCHECK(called_class->IsErroneous());
+ }
+ }
+ if (UNLIKELY(code == NULL)) {
+ // Something went wrong in ResolveMethod or EnsureInitialized,
+ // go into deliver exception with the pending exception in r0
+ CHECK(thread->IsExceptionPending());
+ code = reinterpret_cast<void*>(art_quick_deliver_exception_from_code);
+ regs[0] = reinterpret_cast<uintptr_t>(thread->GetException(NULL));
+ thread->ClearException();
+ } else {
+ // Expect class to at least be initializing.
+ DCHECK(called->GetDeclaringClass()->IsInitializing());
+ // Don't want infinite recursion.
+ DCHECK(code != GetResolutionTrampoline(linker));
+ // Set up entry into main method
+ regs[0] = reinterpret_cast<uintptr_t>(called);
+ }
+ return code;
+}
+
+// Called by the abstract method error stub.
+extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+#else
+ UNUSED(sp);
+#endif
+ ThrowAbstractMethodError(method);
+ self->QuickDeliverException();
+}
+
+// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
+extern "C" void* artFindNativeMethod(Thread* self) {
+ Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
+ DCHECK(Thread::Current() == self);
+ ScopedObjectAccess soa(self);
+
+ mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
+ DCHECK(method != NULL);
+
+ // Lookup symbol address for method, on failure we'll return NULL with an
+ // exception set, otherwise we return the address of the method we found.
+ void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
+ if (native_code == NULL) {
+ DCHECK(self->IsExceptionPending());
+ return NULL;
+ } else {
+ // Register so that future calls don't come here
+ method->RegisterNative(self, native_code);
+ return native_code;
+ }
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_thread.cc b/runtime/oat/runtime/support_thread.cc
new file mode 100644
index 0000000..e711714
--- /dev/null
+++ b/runtime/oat/runtime/support_thread.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "runtime_support.h"
+#include "thread.h"
+#include "thread_list.h"
+
+namespace art {
+
+void CheckSuspendFromCode(Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called when thread->suspend_count_ != 0 on JNI return. JNI method acts as callee-save frame.
+ thread->VerifyStack();
+ CheckSuspend(thread);
+}
+
+extern "C" void artTestSuspendFromCode(Thread* thread, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called when suspend count check value is 0 and thread->suspend_count_ != 0
+ FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
+ CheckSuspend(thread);
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_throw.cc b/runtime/oat/runtime/support_throw.cc
new file mode 100644
index 0000000..9588698
--- /dev/null
+++ b/runtime/oat/runtime/support_throw.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "mirror/object.h"
+#include "object_utils.h"
+#include "runtime_support.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
+extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+ thread->QuickDeliverException();
+}
+
+// Called by generated call to throw an exception.
+extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ /*
+ * exception may be NULL, in which case this routine should
+ * throw NPE. NOTE: this is a convenience for generated code,
+ * which previously did the null check inline and constructed
+ * and threw a NPE if NULL. This routine responsible for setting
+ * exception_ in thread and delivering the exception.
+ */
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ if (exception == NULL) {
+ self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;",
+ "throw with null exception");
+ } else {
+ self->SetException(throw_location, exception);
+ }
+ self->QuickDeliverException();
+}
+
+// Called by generated call to throw a NPE exception.
+extern "C" void artThrowNullPointerExceptionFromCode(Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionFromDexPC(throw_location);
+ self->QuickDeliverException();
+}
+
+// Called by generated call to throw an arithmetic divide by zero exception.
+extern "C" void artThrowDivZeroFromCode(Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ThrowArithmeticExceptionDivideByZero();
+ self->QuickDeliverException();
+}
+
+// Called by generated call to throw an array index out of bounds exception.
+extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ThrowArrayIndexOutOfBoundsException(index, length);
+ self->QuickDeliverException();
+}
+
+extern "C" void artThrowStackOverflowFromCode(Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ThrowStackOverflowError(self);
+ self->QuickDeliverException();
+}
+
+extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self,
+ mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+ ThrowNoSuchMethodError(method_idx);
+ self->QuickDeliverException();
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/x86/context_x86.cc b/runtime/oat/runtime/x86/context_x86.cc
new file mode 100644
index 0000000..ceb10bd
--- /dev/null
+++ b/runtime/oat/runtime/x86/context_x86.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context_x86.h"
+
+#include "mirror/abstract_method.h"
+#include "mirror/object-inl.h"
+#include "stack.h"
+
+namespace art {
+namespace x86 {
+
+static const uint32_t gZero = 0;
+
+void X86Context::Reset() {
+ for (int i = 0; i < kNumberOfCpuRegisters; i++) {
+ gprs_[i] = NULL;
+ }
+ gprs_[ESP] = &esp_;
+ // Initialize registers with easy to spot debug values.
+ esp_ = X86Context::kBadGprBase + ESP;
+ eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters;
+}
+
+void X86Context::FillCalleeSaves(const StackVisitor& fr) {
+ mirror::AbstractMethod* method = fr.GetMethod();
+ uint32_t core_spills = method->GetCoreSpillMask();
+ size_t spill_count = __builtin_popcount(core_spills);
+ DCHECK_EQ(method->GetFpSpillMask(), 0u);
+ size_t frame_size = method->GetFrameSizeInBytes();
+ if (spill_count > 0) {
+ // Lowest number spill is farthest away, walk registers and fill into context.
+ int j = 2; // Offset j to skip return address spill.
+ for (int i = 0; i < kNumberOfCpuRegisters; i++) {
+ if (((core_spills >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ j++;
+ }
+ }
+ }
+}
+
+void X86Context::SmashCallerSaves() {
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[EAX] = const_cast<uint32_t*>(&gZero);
+ gprs_[EDX] = const_cast<uint32_t*>(&gZero);
+ gprs_[ECX] = NULL;
+ gprs_[EBX] = NULL;
+}
+
+void X86Context::SetGPR(uint32_t reg, uintptr_t value){
+ CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
+ CHECK_NE(gprs_[reg], &gZero);
+ CHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
+}
+
+void X86Context::DoLongJump() {
+#if defined(__i386__)
+ // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
+ // the top for the stack pointer that doesn't get popped in a pop-all.
+ volatile uintptr_t gprs[kNumberOfCpuRegisters + 1];
+ for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
+ gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != NULL ? *gprs_[i] : X86Context::kBadGprBase + i;
+ }
+ // We want to load the stack pointer one slot below so that the ret will pop eip.
+ uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - kWordSize;
+ gprs[kNumberOfCpuRegisters] = esp;
+ *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
+ __asm__ __volatile__(
+ "movl %0, %%esp\n\t" // ESP points to gprs.
+ "popal\n\t" // Load all registers except ESP and EIP with values in gprs.
+ "popl %%esp\n\t" // Load stack pointer.
+ "ret\n\t" // From higher in the stack pop eip.
+ : // output.
+ : "g"(&gprs[0]) // input.
+ :); // clobber.
+#else
+ UNIMPLEMENTED(FATAL);
+#endif
+}
+
+} // namespace x86
+} // namespace art
diff --git a/runtime/oat/runtime/x86/context_x86.h b/runtime/oat/runtime/x86/context_x86.h
new file mode 100644
index 0000000..7928fd8
--- /dev/null
+++ b/runtime/oat/runtime/x86/context_x86.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_X86_CONTEXT_X86_H_
+#define ART_SRC_OAT_RUNTIME_X86_CONTEXT_X86_H_
+
+#include "constants_x86.h"
+#include "oat/runtime/context.h"
+
+namespace art {
+namespace x86 {
+
+class X86Context : public Context {
+ public:
+ X86Context() {
+ Reset();
+ }
+ virtual ~X86Context() {}
+
+ virtual void Reset();
+
+ virtual void FillCalleeSaves(const StackVisitor& fr);
+
+ virtual void SetSP(uintptr_t new_sp) {
+ SetGPR(ESP, new_sp);
+ }
+
+ virtual void SetPC(uintptr_t new_pc) {
+ eip_ = new_pc;
+ }
+
+ virtual uintptr_t GetGPR(uint32_t reg) {
+ CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
+ return *gprs_[reg];
+ }
+
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
+
+ virtual void SmashCallerSaves();
+ virtual void DoLongJump();
+
+ private:
+ // Pointers to register locations, floating point registers are all caller save. Values are
+ // initialized to NULL or the special registers below.
+ uintptr_t* gprs_[kNumberOfCpuRegisters];
+ // Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat
+ // special in that it cannot be encoded normally as a register operand to an instruction (except
+ // in 64bit addressing modes).
+ uintptr_t esp_, eip_;
+};
+} // namespace x86
+} // namespace art
+
+#endif // ART_SRC_OAT_RUNTIME_X86_CONTEXT_X86_H_
diff --git a/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc b/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc
new file mode 100644
index 0000000..a90a583
--- /dev/null
+++ b/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "runtime_support.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
+extern "C" void art_quick_check_cast_from_code(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
+extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
+extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object_from_code(void*);
+extern "C" void art_quick_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern "C" double art_quick_fmod_from_code(double, double);
+extern "C" float art_quick_fmodf_from_code(float, float);
+extern "C" double art_quick_l2d_from_code(int64_t);
+extern "C" float art_quick_l2f_from_code(int64_t);
+extern "C" int64_t art_quick_d2l_from_code(double);
+extern "C" int64_t art_quick_f2l_from_code(float);
+extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t);
+extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t);
+extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t);
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Intrinsic entrypoints.
+extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+extern "C" void* art_quick_memcpy(void*, const void*, size_t);
+
+// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero_from_code();
+extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception_from_code();
+extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+
+void InitEntryPoints(EntryPoints* points) {
+ // Alloc
+ points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+
+ // Cast
+ points->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
+ points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ points->pCheckCastFromCode = art_quick_check_cast_from_code;
+
+ // DexCache
+ points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ points->pResolveStringFromCode = art_quick_resolve_string_from_code;
+
+ // Field
+ points->pSet32Instance = art_quick_set32_instance_from_code;
+ points->pSet32Static = art_quick_set32_static_from_code;
+ points->pSet64Instance = art_quick_set64_instance_from_code;
+ points->pSet64Static = art_quick_set64_static_from_code;
+ points->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ points->pSetObjStatic = art_quick_set_obj_static_from_code;
+ points->pGet32Instance = art_quick_get32_instance_from_code;
+ points->pGet64Instance = art_quick_get64_instance_from_code;
+ points->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ points->pGet32Static = art_quick_get32_static_from_code;
+ points->pGet64Static = art_quick_get64_static_from_code;
+ points->pGetObjStatic = art_quick_get_obj_static_from_code;
+
+ // FillArray
+ points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+
+ // JNI
+ points->pJniMethodStart = JniMethodStart;
+ points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ points->pJniMethodEnd = JniMethodEnd;
+ points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ points->pJniMethodEndWithReference = JniMethodEndWithReference;
+ points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ points->pLockObjectFromCode = art_quick_lock_object_from_code;
+ points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+
+ // Math
+ //points->pCmpgDouble = NULL; // Not needed on x86.
+ //points->pCmpgFloat = NULL; // Not needed on x86.
+ //points->pCmplDouble = NULL; // Not needed on x86.
+ //points->pCmplFloat = NULL; // Not needed on x86.
+ points->pFmod = art_quick_fmod_from_code;
+ points->pL2d = art_quick_l2d_from_code;
+ points->pFmodf = art_quick_fmodf_from_code;
+ points->pL2f = art_quick_l2f_from_code;
+ //points->pD2iz = NULL; // Not needed on x86.
+ //points->pF2iz = NULL; // Not needed on x86.
+ points->pIdivmod = art_quick_idivmod_from_code;
+ points->pD2l = art_quick_d2l_from_code;
+ points->pF2l = art_quick_f2l_from_code;
+ points->pLdiv = art_quick_ldiv_from_code;
+ points->pLdivmod = art_quick_ldivmod_from_code;
+ points->pLmul = art_quick_lmul_from_code;
+ points->pShlLong = art_quick_lshl_from_code;
+ points->pShrLong = art_quick_lshr_from_code;
+ points->pUshrLong = art_quick_lushr_from_code;
+
+ // Interpreter
+ points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
+ // Intrinsics
+ points->pIndexOf = art_quick_indexof;
+ points->pMemcmp16 = art_quick_memcmp16;
+ points->pStringCompareTo = art_quick_string_compareto;
+ points->pMemcpy = art_quick_memcpy;
+
+ // Invocation
+ points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+ points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ points->pCheckSuspendFromCode = CheckSuspendFromCode;
+ points->pTestSuspendFromCode = art_quick_test_suspend;
+
+ // Throws
+ points->pDeliverException = art_quick_deliver_exception_from_code;
+ points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+};
+
+} // namespace art
diff --git a/runtime/oat/runtime/x86/runtime_support_x86.S b/runtime/oat/runtime/x86/runtime_support_x86.S
new file mode 100644
index 0000000..ee6db0c
--- /dev/null
+++ b/runtime/oat/runtime/x86/runtime_support_x86.S
@@ -0,0 +1,1211 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support.h"
+
+#if defined(__APPLE__)
+ // Mac OS' as(1) doesn't let you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name
+ #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
+ #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
+ #define END_MACRO .endmacro
+
+ // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
+ // are mangled with an extra underscore prefix. The use of $x for arguments
+ // mean that literals need to be represented with $$x in macros.
+ #define SYMBOL(name) _ ## name
+ #define VAR(name,index) SYMBOL($index)
+ #define REG_VAR(name,index) %$index
+ #define CALL_MACRO(name,index) $index
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $$value
+#else
+ // Regular gas(1) lets you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
+ #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
+ #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+ #define END_MACRO .endm
+
+ // Regular gas(1) uses \argument_name for macro arguments.
+ // We need to turn on alternate macro syntax so we can use & instead or the preprocessor
+ // will screw us by inserting a space between the \ and the name. Even in this mode there's
+ // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
+ // special character meaning care needs to be taken when passing registers as macro arguments.
+ .altmacro
+ #define SYMBOL(name) name
+ #define VAR(name,index) name&
+ #define REG_VAR(name,index) %name
+ #define CALL_MACRO(name,index) name&
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $value
+#endif
+
+ /* Cache alignment for function entry */
+MACRO0(ALIGN_FUNCTION_ENTRY)
+ .balign 16
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION, c_name)
+ .type VAR(c_name, 0), @function
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ .cfi_startproc
+END_MACRO
+
+MACRO1(END_FUNCTION, c_name)
+ .cfi_endproc
+ .size \c_name, .-\c_name
+END_MACRO
+
+MACRO1(PUSH, reg)
+ pushl REG_VAR(reg, 0)
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset REG_VAR(reg, 0), 0
+END_MACRO
+
+MACRO1(POP, reg)
+ popl REG_VAR(reg,0)
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore REG_VAR(reg,0)
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ */
+MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
+ PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
+ PUSH esi
+ PUSH ebp
+ subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
+ .cfi_adjust_cfa_offset 16
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsOnly)
+ */
+MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
+ PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
+ PUSH esi
+ PUSH ebp
+ subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
+ .cfi_adjust_cfa_offset 16
+END_MACRO
+
+MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
+ addl MACRO_LITERAL(28), %esp // Unwind stack up to return address
+ .cfi_adjust_cfa_offset -28
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+ */
+MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+ PUSH edi // Save callee saves
+ PUSH esi
+ PUSH ebp
+ PUSH ebx // Save args
+ PUSH edx
+ PUSH ecx
+ PUSH eax // Align stack, eax will be clobbered by Method*
+END_MACRO
+
+MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+ addl MACRO_LITERAL(4), %esp // Remove padding
+ .cfi_adjust_cfa_offset -4
+ POP ecx // Restore args except eax
+ POP edx
+ POP ebx
+ POP ebp // Restore callee saves
+ POP esi
+ POP edi
+END_MACRO
+
+ /*
+ * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+MACRO0(DELIVER_PENDING_EXCEPTION)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save callee saves for throw
+ mov %esp, %ecx
+ // Outgoing argument set up
+ subl MACRO_LITERAL(8), %esp // Alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
+ int3 // unreached
+END_MACRO
+
+MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %ecx
+ // Outgoing argument set up
+ subl MACRO_LITERAL(8), %esp // alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ int3 // unreached
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %ecx
+ // Outgoing argument set up
+ PUSH eax // alignment padding
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ int3 // unreached
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %edx
+ // Outgoing argument set up
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ int3 // unreached
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+ /*
+ * Called by managed code to create and deliver a NullPointerException.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+
+ /*
+ * Called by managed code to create and deliver an ArithmeticException.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+
+ /*
+ * Called by managed code, saves callee saves and then calls artThrowException
+ * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+
+ /*
+ * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
+ * index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
+ *
+ * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
+ * of the target Method* in r0 and method->code_ in r1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+ * pointing back to the original caller.
+ */
+MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+ // return address
+ PUSH edi
+ PUSH esi
+ PUSH ebp
+ PUSH ebx
+ PUSH edx
+ PUSH ecx
+ PUSH eax // <-- callee save Method* to go here
+ movl %esp, %edx // remember SP
+ // Outgoing argument set up
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ pushl 32(%edx) // pass caller Method*
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ movl %edx, %edi // save code pointer in EDI
+ addl MACRO_LITERAL(36), %esp // Pop arguments skip eax
+ .cfi_adjust_cfa_offset -36
+ POP ecx // Restore args
+ POP edx
+ POP ebx
+ POP ebp // Restore callee saves.
+ POP esi
+ // Swap EDI callee save with code pointer.
+ xchgl %edi, (%esp)
+ testl %eax, %eax // Branch forward if exception pending.
+ jz 1f
+ // Tail call to intended method.
+ ret
+1:
+ addl MACRO_LITERAL(4), %esp // Pop code pointer off stack
+ .cfi_adjust_cfa_offset -4
+ DELIVER_PENDING_EXCEPTION
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+ /*
+ * Portable invocation stub.
+ * On entry:
+ * [sp] = return address
+ * [sp + 4] = method pointer
+ * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 12] = size of argument array in bytes
+ * [sp + 16] = (managed) thread pointer
+ * [sp + 20] = JValue* result
+ * [sp + 24] = result type char
+ */
+DEFINE_FUNCTION art_portable_invoke_stub
+ PUSH ebp // save ebp
+ PUSH ebx // save ebx
+ mov %esp, %ebp // copy value of stack pointer into base pointer
+ .cfi_def_cfa_register ebp
+ mov 20(%ebp), %ebx // get arg array size
+ addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
+ andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
+ subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
+ subl %ebx, %esp // reserve stack space for argument array
+ lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
+ pushl 20(%ebp) // push size of region to memcpy
+ pushl 16(%ebp) // push arg array as source of memcpy
+ pushl %eax // push stack pointer as destination of memcpy
+ call SYMBOL(memcpy) // (void*, const void*, size_t)
+ addl LITERAL(12), %esp // pop arguments to memcpy
+ mov 12(%ebp), %eax // move method pointer into eax
+ mov %eax, (%esp) // push method pointer onto stack
+ call *METHOD_CODE_OFFSET(%eax) // call the method
+ mov %ebp, %esp // restore stack pointer
+ POP ebx // pop ebx
+ POP ebp // pop ebp
+ mov 20(%esp), %ecx // get result pointer
+ cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
+ je return_double_portable
+ cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
+ je return_float_portable
+ mov %eax, (%ecx) // store the result
+ mov %edx, 4(%ecx) // store the other half of the result
+ ret
+return_double_portable:
+ fstpl (%ecx) // store the floating point result as double
+ ret
+return_float_portable:
+ fstps (%ecx) // store the floating point result as float
+ ret
+END_FUNCTION art_portable_invoke_stub
+
+ /*
+ * Quick invocation stub.
+ * On entry:
+ * [sp] = return address
+ * [sp + 4] = method pointer
+ * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 12] = size of argument array in bytes
+ * [sp + 16] = (managed) thread pointer
+ * [sp + 20] = JValue* result
+ * [sp + 24] = result type char
+ */
+DEFINE_FUNCTION art_quick_invoke_stub
+ PUSH ebp // save ebp
+ PUSH ebx // save ebx
+ mov %esp, %ebp // copy value of stack pointer into base pointer
+ .cfi_def_cfa_register ebp
+ mov 20(%ebp), %ebx // get arg array size
+ addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
+ andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
+ subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
+ subl %ebx, %esp // reserve stack space for argument array
+ lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
+ pushl 20(%ebp) // push size of region to memcpy
+ pushl 16(%ebp) // push arg array as source of memcpy
+ pushl %eax // push stack pointer as destination of memcpy
+ call SYMBOL(memcpy) // (void*, const void*, size_t)
+ addl LITERAL(12), %esp // pop arguments to memcpy
+ movl LITERAL(0), (%esp) // store NULL for method*
+ mov 12(%ebp), %eax // move method pointer into eax
+ mov 4(%esp), %ecx // copy arg1 into ecx
+ mov 8(%esp), %edx // copy arg2 into edx
+ mov 12(%esp), %ebx // copy arg3 into ebx
+ call *METHOD_CODE_OFFSET(%eax) // call the method
+ mov %ebp, %esp // restore stack pointer
+ POP ebx // pop ebx
+ POP ebp // pop ebp
+ mov 20(%esp), %ecx // get result pointer
+ cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
+ je return_double_quick
+ cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
+ je return_float_quick
+ mov %eax, (%ecx) // store the result
+ mov %edx, 4(%ecx) // store the other half of the result
+ ret
+return_double_quick:
+return_float_quick:
+ movsd %xmm0, (%ecx) // store the floating point result
+ ret
+END_FUNCTION art_quick_invoke_stub
+
+MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %edx // remember SP
+ // Outgoing argument set up
+ subl MACRO_LITERAL(8), %esp // push padding
+ .cfi_adjust_cfa_offset 8
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %edx // remember SP
+ // Outgoing argument set up
+ PUSH eax // push padding
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %edx // remember SP
+ // Outgoing argument set up
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ // Outgoing argument set up
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ addl MACRO_LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO0(RETURN_IF_EAX_NOT_ZERO)
+ testl %eax, %eax // eax == 0 ?
+ jz 1f // if eax == 0 goto 1
+ ret // return
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO0(RETURN_IF_EAX_ZERO)
+ testl %eax, %eax // eax == 0 ?
+ jnz 1f // if eax != 0 goto 1
+ ret // return
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
+ mov %fs:THREAD_EXCEPTION_OFFSET, %ebx // get exception field
+ testl %ebx, %ebx // ebx == 0 ?
+ jnz 1f // if ebx != 0 goto 1
+ ret // return
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+TWO_ARG_DOWNCALL art_quick_alloc_object_from_code, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_from_code_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_from_code, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_from_code_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+
+TWO_ARG_DOWNCALL art_quick_resolve_string_from_code, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO
+
+ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret
+ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO
+
+TWO_ARG_DOWNCALL art_quick_handle_fill_data_from_code, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
+
+DEFINE_FUNCTION art_quick_is_assignable_from_code
+ PUSH eax // alignment padding
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b, Thread*, SP)
+ addl LITERAL(12), %esp // pop arguments
+ .cfi_adjust_cfa_offset -12
+ ret
+END_FUNCTION art_quick_is_assignable_from_code
+
+DEFINE_FUNCTION art_quick_memcpy
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call SYMBOL(memcpy) // (void*, const void*, size_t)
+ addl LITERAL(12), %esp // pop arguments
+ .cfi_adjust_cfa_offset -12
+ ret
+END_FUNCTION art_quick_memcpy
+
+TWO_ARG_DOWNCALL art_quick_check_cast_from_code, artCheckCastFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_can_put_array_element_from_code, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO
+
+NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
+
+DEFINE_FUNCTION art_quick_fmod_from_code
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass arg4 b.hi
+ PUSH edx // pass arg3 b.lo
+ PUSH ecx // pass arg2 a.hi
+ PUSH eax // pass arg1 a.lo
+ call SYMBOL(fmod) // (jdouble a, jdouble b)
+ fstpl (%esp) // pop return value off fp stack
+ movsd (%esp), %xmm0 // place into %xmm0
+ addl LITERAL(28), %esp // pop arguments
+ .cfi_adjust_cfa_offset -28
+ ret
+END_FUNCTION art_quick_fmod_from_code
+
+DEFINE_FUNCTION art_quick_fmodf_from_code
+ PUSH eax // alignment padding
+ PUSH ecx // pass arg2 b
+ PUSH eax // pass arg1 a
+ call SYMBOL(fmodf) // (jfloat a, jfloat b)
+ fstps (%esp) // pop return value off fp stack
+ movss (%esp), %xmm0 // place into %xmm0
+ addl LITERAL(12), %esp // pop arguments
+ .cfi_adjust_cfa_offset -12
+ ret
+END_FUNCTION art_quick_fmodf_from_code
+
+DEFINE_FUNCTION art_quick_l2d_from_code
+ PUSH ecx // push arg2 a.hi
+ PUSH eax // push arg1 a.lo
+ fildll (%esp) // load as integer and push into st0
+ fstpl (%esp) // pop value off fp stack as double
+ movsd (%esp), %xmm0 // place into %xmm0
+ addl LITERAL(8), %esp // pop arguments
+ .cfi_adjust_cfa_offset -8
+ ret
+END_FUNCTION art_quick_l2d_from_code
+
+DEFINE_FUNCTION art_quick_l2f_from_code
+ PUSH ecx // push arg2 a.hi
+ PUSH eax // push arg1 a.lo
+ fildll (%esp) // load as integer and push into st0
+ fstps (%esp) // pop value off fp stack as a single
+ movss (%esp), %xmm0 // place into %xmm0
+ addl LITERAL(8), %esp // pop argument
+ .cfi_adjust_cfa_offset -8
+ ret
+END_FUNCTION art_quick_l2f_from_code
+
+DEFINE_FUNCTION art_quick_d2l_from_code
+ PUSH eax // alignment padding
+ PUSH ecx // pass arg2 a.hi
+ PUSH eax // pass arg1 a.lo
+ call SYMBOL(art_d2l) // (jdouble a)
+ addl LITERAL(12), %esp // pop arguments
+ .cfi_adjust_cfa_offset -12
+ ret
+END_FUNCTION art_quick_d2l_from_code
+
+DEFINE_FUNCTION art_quick_f2l_from_code
+ subl LITERAL(8), %esp // alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH eax // pass arg1 a
+ call SYMBOL(art_f2l) // (jfloat a)
+ addl LITERAL(12), %esp // pop arguments
+ .cfi_adjust_cfa_offset -12
+ ret
+END_FUNCTION art_quick_f2l_from_code
+
+DEFINE_FUNCTION art_quick_idivmod_from_code
+ cmpl LITERAL(0x80000000), %eax
+ je check_arg2 // special case
+args_ok:
+ cdq // edx:eax = sign extend eax
+ idiv %ecx // (edx,eax) = (edx:eax % ecx, edx:eax / ecx)
+ ret
+check_arg2:
+ cmpl LITERAL(-1), %ecx
+ jne args_ok
+ xorl %edx, %edx
+ ret // eax already holds min int
+END_FUNCTION art_quick_idivmod_from_code
+
+DEFINE_FUNCTION art_quick_ldiv_from_code
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass arg4 b.hi
+ PUSH edx // pass arg3 b.lo
+ PUSH ecx // pass arg2 a.hi
+ PUSH eax // pass arg1 a.lo
+ call SYMBOL(artLdivFromCode) // (jlong a, jlong b)
+ addl LITERAL(28), %esp // pop arguments
+ .cfi_adjust_cfa_offset -28
+ ret
+END_FUNCTION art_quick_ldiv_from_code
+
+DEFINE_FUNCTION art_quick_ldivmod_from_code
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass arg4 b.hi
+ PUSH edx // pass arg3 b.lo
+ PUSH ecx // pass arg2 a.hi
+ PUSH eax // pass arg1 a.lo
+ call SYMBOL(artLdivmodFromCode) // (jlong a, jlong b)
+ addl LITERAL(28), %esp // pop arguments
+ .cfi_adjust_cfa_offset -28
+ ret
+END_FUNCTION art_quick_ldivmod_from_code
+
+DEFINE_FUNCTION art_quick_lmul_from_code
+ imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx)
+ imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx)
+ mul %edx // edx:eax = a.lo(eax) * b.lo(edx)
+ add %ebx, %ecx
+ add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi)
+ ret
+END_FUNCTION art_quick_lmul_from_code
+
+DEFINE_FUNCTION art_quick_lshl_from_code
+ // ecx:eax << edx
+ xchg %edx, %ecx
+ shld %cl,%eax,%edx
+ shl %cl,%eax
+ test LITERAL(32), %cl
+ jz 1f
+ mov %eax, %edx
+ xor %eax, %eax
+1:
+ ret
+END_FUNCTION art_quick_lshl_from_code
+
+DEFINE_FUNCTION art_quick_lshr_from_code
+ // ecx:eax >> edx
+ xchg %edx, %ecx
+ shrd %cl,%edx,%eax
+ sar %cl,%edx
+ test LITERAL(32),%cl
+ jz 1f
+ mov %edx, %eax
+ sar LITERAL(31), %edx
+1:
+ ret
+END_FUNCTION art_quick_lshr_from_code
+
+DEFINE_FUNCTION art_quick_lushr_from_code
+ // ecx:eax >>> edx
+ xchg %edx, %ecx
+ shrd %cl,%edx,%eax
+ shr %cl,%edx
+ test LITERAL(32),%cl
+ jz 1f
+ mov %edx, %eax
+ xor %edx, %edx
+1:
+ ret
+END_FUNCTION art_quick_lushr_from_code
+
+DEFINE_FUNCTION art_quick_set32_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ subl LITERAL(8), %esp // alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ mov 32(%ebx), %ebx // get referrer
+ PUSH ebx // pass referrer
+ PUSH edx // pass new_val
+ PUSH ecx // pass object
+ PUSH eax // pass field_idx
+ call SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
+END_FUNCTION art_quick_set32_instance_from_code
+
+DEFINE_FUNCTION art_quick_set64_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ subl LITERAL(8), %esp // alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH esp // pass SP-8
+ addl LITERAL(8), (%esp) // fix SP on stack by adding 8
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ebx // pass high half of new_val
+ PUSH edx // pass low half of new_val
+ PUSH ecx // pass object
+ PUSH eax // pass field_idx
+ call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
+END_FUNCTION art_quick_set64_instance_from_code
+
+DEFINE_FUNCTION art_quick_set_obj_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ subl LITERAL(8), %esp // alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ mov 32(%ebx), %ebx // get referrer
+ PUSH ebx // pass referrer
+ PUSH edx // pass new_val
+ PUSH ecx // pass object
+ PUSH eax // pass field_idx
+ call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
+END_FUNCTION art_quick_set_obj_instance_from_code
+
+DEFINE_FUNCTION art_quick_get32_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ mov 32(%esp), %edx // get referrer
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // pass referrer
+ PUSH ecx // pass object
+ PUSH eax // pass field_idx
+ call SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_get32_instance_from_code
+
+DEFINE_FUNCTION art_quick_get64_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ mov 32(%esp), %edx // get referrer
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // pass referrer
+ PUSH ecx // pass object
+ PUSH eax // pass field_idx
+ call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_get64_instance_from_code
+
+DEFINE_FUNCTION art_quick_get_obj_instance_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ mov 32(%esp), %edx // get referrer
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // pass referrer
+ PUSH ecx // pass object
+ PUSH eax // pass field_idx
+ call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_get_obj_instance_from_code
+
+DEFINE_FUNCTION art_quick_set32_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ mov 32(%esp), %edx // get referrer
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // pass referrer
+ PUSH ecx // pass new_val
+ PUSH eax // pass field_idx
+ call SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
+END_FUNCTION art_quick_set32_static_from_code
+
+DEFINE_FUNCTION art_quick_set64_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ subl LITERAL(8), %esp // alignment padding
+ .cfi_adjust_cfa_offset 8
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ mov 32(%ebx), %ebx // get referrer
+ PUSH edx // pass high half of new_val
+ PUSH ecx // pass low half of new_val
+ PUSH ebx // pass referrer
+ PUSH eax // pass field_idx
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ .cfi_adjust_cfa_offset -32
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
+END_FUNCTION art_quick_set64_static_from_code
+
+DEFINE_FUNCTION art_quick_set_obj_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %ebx // remember SP
+ mov 32(%esp), %edx // get referrer
+ subl LITERAL(12), %esp // alignment padding
+ .cfi_adjust_cfa_offset 12
+ PUSH ebx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // pass referrer
+ PUSH ecx // pass new_val
+ PUSH eax // pass field_idx
+ call SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
+ addl LITERAL(32), %esp // pop arguments
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_EAX_ZERO // return or deliver exception
+END_FUNCTION art_quick_set_obj_static_from_code
+
+DEFINE_FUNCTION art_quick_get32_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %edx // remember SP
+ mov 32(%esp), %ecx // get referrer
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass referrer
+ PUSH eax // pass field_idx
+ call SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP)
+ addl LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_get32_static_from_code
+
+DEFINE_FUNCTION art_quick_get64_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %edx // remember SP
+ mov 32(%esp), %ecx // get referrer
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass referrer
+ PUSH eax // pass field_idx
+ call SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP)
+ addl LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_get64_static_from_code
+
+DEFINE_FUNCTION art_quick_get_obj_static_from_code
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ mov %esp, %edx // remember SP
+ mov 32(%esp), %ecx // get referrer
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass referrer
+ PUSH eax // pass field_idx
+ call SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP)
+ addl LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_get_obj_static_from_code
+
+DEFINE_FUNCTION art_portable_proxy_invoke_handler
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
+ PUSH esp // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass receiver
+ PUSH eax // pass proxy method
+ call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ movd %eax, %xmm0 // place return value also into floating point return value
+ movd %edx, %xmm1
+ punpckldq %xmm1, %xmm0
+ addl LITERAL(44), %esp // pop arguments
+ .cfi_adjust_cfa_offset -44
+ ret
+END_FUNCTION art_portable_proxy_invoke_handler
+
+DEFINE_FUNCTION art_quick_proxy_invoke_handler
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
+ PUSH esp // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass receiver
+ PUSH eax // pass proxy method
+ call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ movd %eax, %xmm0 // place return value also into floating point return value
+ movd %edx, %xmm1
+ punpckldq %xmm1, %xmm0
+ addl LITERAL(44), %esp // pop arguments
+ .cfi_adjust_cfa_offset -44
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_proxy_invoke_handler
+
+DEFINE_FUNCTION art_quick_interpreter_entry
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame
+ mov %esp, %edx // remember SP
+ PUSH eax // alignment padding
+ PUSH edx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH eax // pass method
+ call SYMBOL(artInterpreterEntry) // (method, Thread*, SP)
+ movd %eax, %xmm0 // place return value also into floating point return value
+ movd %edx, %xmm1
+ punpckldq %xmm1, %xmm0
+ addl LITERAL(44), %esp // pop arguments
+ .cfi_adjust_cfa_offset -44
+ RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
+END_FUNCTION art_quick_interpreter_entry
+
+ /*
+ * Routine that intercepts method calls and returns.
+ */
+DEFINE_FUNCTION art_quick_instrumentation_entry_from_code
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ movl %esp, %edx // Save SP.
+ PUSH eax // Save eax which will be clobbered by the callee-save method.
+ subl LITERAL(8), %esp // Align stack.
+ .cfi_adjust_cfa_offset 8
+ pushl 40(%esp) // Pass LR.
+ .cfi_adjust_cfa_offset 4
+ PUSH edx // Pass SP.
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // Pass receiver.
+ PUSH eax // Pass Method*.
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
+ addl LITERAL(28), %esp // Pop arguments upto saved Method*.
+ movl 28(%esp), %edi // Restore edi.
+ movl %eax, 28(%esp) // Place code* over edi, just under return pc.
+ movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp)
+ // Place instrumentation exit as return pc.
+ movl (%esp), %eax // Restore eax.
+ movl 8(%esp), %ecx // Restore ecx.
+ movl 12(%esp), %edx // Restore edx.
+ movl 16(%esp), %ebx // Restore ebx.
+ movl 20(%esp), %ebp // Restore ebp.
+ movl 24(%esp), %esi // Restore esi.
+ addl LITERAL(28), %esp // Wind stack back upto code*.
+ ret // Call method (and pop).
+END_FUNCTION art_quick_instrumentation_entry_from_code
+
+DEFINE_FUNCTION art_quick_instrumentation_exit_from_code
+ pushl LITERAL(0) // Push a fake return PC as there will be none on the stack.
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ mov %esp, %ecx // Remember SP
+ subl LITERAL(8), %esp // Save float return value.
+ .cfi_adjust_cfa_offset 8
+ movd %xmm0, (%esp)
+ PUSH edx // Save gpr return value.
+ PUSH eax
+ subl LITERAL(8), %esp // Align stack
+ movd %xmm0, (%esp)
+ subl LITERAL(8), %esp // Pass float return value.
+ .cfi_adjust_cfa_offset 8
+ movd %xmm0, (%esp)
+ PUSH edx // Pass gpr return value.
+ PUSH eax
+ PUSH ecx // Pass SP.
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current.
+ .cfi_adjust_cfa_offset 4
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
+ mov %eax, %ecx // Move returned link register.
+ addl LITERAL(32), %esp // Pop arguments.
+ .cfi_adjust_cfa_offset -32
+ movl %edx, %ebx // Move returned link register for deopt
+ // (ebx is pretending to be our LR).
+ POP eax // Restore gpr return value.
+ POP edx
+ movd (%esp), %xmm0 // Restore fpr return value.
+ addl LITERAL(8), %esp
+ .cfi_adjust_cfa_offset -8
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+ addl LITERAL(4), %esp // Remove fake return pc.
+ jmp *%ecx // Return.
+END_FUNCTION art_quick_instrumentation_exit_from_code
+
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+DEFINE_FUNCTION art_quick_deoptimize
+ pushl %ebx // Fake that we were called.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ mov %esp, %ecx // Remember SP.
+ subl LITERAL(8), %esp // Align stack.
+ .cfi_adjust_cfa_offset 8
+ PUSH ecx // Pass SP.
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ .cfi_adjust_cfa_offset 4
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ int3 // Unreachable.
+END_FUNCTION art_quick_deoptimize
+
+ /*
+ * Portable abstract method error stub. method* is at %esp + 4 on entry.
+ */
+DEFINE_FUNCTION art_portable_abstract_method_error_stub
+ PUSH ebp
+ movl %esp, %ebp // Remember SP.
+ .cfi_def_cfa_register ebp
+ subl LITERAL(12), %esp // Align stack.
+ PUSH esp // Pass sp (not used).
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ pushl 8(%ebp) // Pass Method*.
+ call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP)
+ leave // Restore the stack and %ebp.
+ .cfi_def_cfa esp, 4
+ .cfi_restore ebp
+ ret // Return to caller to handle pending exception.
+END_FUNCTION art_portable_abstract_method_error_stub
+
+ /*
+ * Quick abstract method error stub. %eax contains method* on entry.
+ */
+DEFINE_FUNCTION art_quick_abstract_method_error_stub
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ movl %esp, %ecx // Remember SP.
+ PUSH eax // Align frame.
+ PUSH ecx // Pass SP for Method*.
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ .cfi_adjust_cfa_offset 4
+ PUSH eax // Pass Method*.
+ call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP)
+ int3 // Unreachable.
+END_FUNCTION art_quick_abstract_method_error_stub
+
+ /*
+ * Portable resolution trampoline.
+ */
+DEFINE_FUNCTION art_jni_dlsym_lookup_stub
+ subl LITERAL(8), %esp // align stack
+ .cfi_adjust_cfa_offset 8
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ call SYMBOL(artFindNativeMethod) // (Thread*)
+ addl LITERAL(12), %esp // restore the stack
+ .cfi_adjust_cfa_offset -12
+ cmpl LITERAL(0), %eax // check if returned method code is null
+ je no_native_code_found // if null, jump to return to handle
+ jmp *%eax // otherwise, tail call to intended method
+no_native_code_found:
+ ret
+END_FUNCTION art_jni_dlsym_lookup_stub
+
+ /*
+ * String's indexOf.
+ *
+ * On entry:
+ * eax: string object (known non-null)
+ * ecx: char to match (known <= 0xFFFF)
+ * edx: Starting offset in string data
+ */
+DEFINE_FUNCTION art_quick_indexof
+ PUSH edi // push callee save reg
+ mov STRING_COUNT_OFFSET(%eax), %ebx
+ mov STRING_VALUE_OFFSET(%eax), %edi
+ mov STRING_OFFSET_OFFSET(%eax), %eax
+ testl %edx, %edx // check if start < 0
+ jl clamp_min
+clamp_done:
+ cmpl %ebx, %edx // check if start >= count
+ jge not_found
+ lea STRING_DATA_OFFSET(%edi, %eax, 2), %edi // build a pointer to the start of string data
+ mov %edi, %eax // save a copy in eax to later compute result
+ lea (%edi, %edx, 2), %edi // build pointer to start of data to compare
+ subl %edx, %ebx // compute iteration count
+ /*
+ * At this point we have:
+ * eax: original start of string data
+ * ecx: char to compare
+ * ebx: length to compare
+ * edi: start of data to test
+ */
+ mov %eax, %edx
+ mov %ecx, %eax // put char to match in %eax
+ mov %ebx, %ecx // put length to compare in %ecx
+ repne scasw // find %ax, starting at [%edi], up to length %ecx
+ jne not_found
+ subl %edx, %edi
+ sar LITERAL(1), %edi
+ decl %edi // index = ((curr_ptr - orig_ptr) / 2) - 1
+ mov %edi, %eax
+ POP edi // pop callee save reg
+ ret
+ .balign 16
+not_found:
+ mov LITERAL(-1), %eax // return -1 (not found)
+ POP edi // pop callee save reg
+ ret
+clamp_min:
+ xor %edx, %edx // clamp start to 0
+ jmp clamp_done
+END_FUNCTION art_quick_indexof
+
+ /*
+ * String's compareTo.
+ *
+ * On entry:
+ * eax: this string object (known non-null)
+ * ecx: comp string object (known non-null)
+ */
+DEFINE_FUNCTION art_quick_string_compareto
+ PUSH esi // push callee save reg
+ PUSH edi // push callee save reg
+ mov STRING_COUNT_OFFSET(%eax), %edx
+ mov STRING_COUNT_OFFSET(%ecx), %ebx
+ mov STRING_VALUE_OFFSET(%eax), %esi
+ mov STRING_VALUE_OFFSET(%ecx), %edi
+ mov STRING_OFFSET_OFFSET(%eax), %eax
+ mov STRING_OFFSET_OFFSET(%ecx), %ecx
+ /* Build pointers to the start of string data */
+ lea STRING_DATA_OFFSET(%esi, %eax, 2), %esi
+ lea STRING_DATA_OFFSET(%edi, %ecx, 2), %edi
+ /* Calculate min length and count diff */
+ mov %edx, %ecx
+ mov %edx, %eax
+ subl %ebx, %eax
+ cmovg %ebx, %ecx
+ /*
+ * At this point we have:
+ * eax: value to return if first part of strings are equal
+ * ecx: minimum among the lengths of the two strings
+ * esi: pointer to this string data
+ * edi: pointer to comp string data
+ */
+ repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
+ jne not_equal
+ POP edi // pop callee save reg
+ POP esi // pop callee save reg
+ ret
+ .balign 16
+not_equal:
+ movzwl -2(%esi), %eax // get last compared char from this string
+ movzwl -2(%edi), %ecx // get last compared char from comp string
+ subl %ecx, %eax // return the difference
+ POP edi // pop callee save reg
+ POP esi // pop callee save reg
+ ret
+END_FUNCTION art_quick_string_compareto
+
+MACRO1(UNIMPLEMENTED,name)
+ .globl VAR(name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(name, 0):
+ int3
+END_MACRO
+
+ // TODO: implement these!
+UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/oat/utils/arm/assembler_arm.cc b/runtime/oat/utils/arm/assembler_arm.cc
new file mode 100644
index 0000000..960a60d
--- /dev/null
+++ b/runtime/oat/utils/arm/assembler_arm.cc
@@ -0,0 +1,1895 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm.h"
+
+#include "base/logging.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Instruction encoding bits.
+enum {
+ H = 1 << 5, // halfword (or byte)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B0 = 1,
+ B1 = 1 << 1,
+ B2 = 1 << 2,
+ B3 = 1 << 3,
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B10 = 1 << 10,
+ B11 = 1 << 11,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B17 = 1 << 17,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks.
+ RdMask = 15 << 12, // in str instruction
+ CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+
+ // ldrex/strex register field encodings.
+ kLdExRnShift = 16,
+ kLdExRtShift = 12,
+ kStrExRnShift = 16,
+ kStrExRdShift = 12,
+ kStrExRtShift = 0,
+};
+
+
+static const char* kRegisterNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= R0 && rhs <= PC) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
+ if (rhs >= S0 && rhs < kNumberOfSRegisters) {
+ os << "s" << static_cast<int>(rhs);
+ } else {
+ os << "SRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+ if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+ os << "d" << static_cast<int>(rhs);
+ } else {
+ os << "DRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+
+static const char* kConditionNames[] = {
+ "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
+ "LE", "AL",
+};
+std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
+ if (rhs >= EQ && rhs <= AL) {
+ os << kConditionNames[rhs];
+ } else {
+ os << "Condition[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+void ArmAssembler::Emit(int32_t value) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<int32_t>(value);
+}
+
+
+void ArmAssembler::EmitType01(Condition cond,
+ int type,
+ Opcode opcode,
+ int set_cc,
+ Register rn,
+ Register rd,
+ ShifterOperand so) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ type << kTypeShift |
+ static_cast<int32_t>(opcode) << kOpcodeShift |
+ set_cc << kSShift |
+ static_cast<int32_t>(rn) << kRnShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ so.encoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ 5 << kTypeShift |
+ (link ? 1 : 0) << kLinkShift;
+ Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
+}
+
+
+void ArmAssembler::EmitMemOp(Condition cond,
+ bool load,
+ bool byte,
+ Register rd,
+ Address ad) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 |
+ (load ? L : 0) |
+ (byte ? B : 0) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ ad.encoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
+ int32_t mode,
+ Register rd,
+ Address ad) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B22 |
+ mode |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ ad.encoding3();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMultiMemOp(Condition cond,
+ BlockAddressMode am,
+ bool load,
+ Register base,
+ RegList regs) {
+ CHECK_NE(base, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 |
+ am |
+ (load ? L : 0) |
+ (static_cast<int32_t>(base) << kRnShift) |
+ regs;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftImmediate(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so) {
+ CHECK_NE(cond, kNoCondition);
+ CHECK_EQ(so.type(), 1U);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ static_cast<int32_t>(MOV) << kOpcodeShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ so.encoding() << kShiftImmShift |
+ static_cast<int32_t>(opcode) << kShiftShift |
+ static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftRegister(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so) {
+ CHECK_NE(cond, kNoCondition);
+ CHECK_EQ(so.type(), 0U);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ static_cast<int32_t>(MOV) << kOpcodeShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ so.encoding() << kShiftRegisterShift |
+ static_cast<int32_t>(opcode) << kShiftShift |
+ B4 |
+ static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
+ if (label->IsBound()) {
+ EmitType5(cond, label->Position() - buffer_.Size(), link);
+ } else {
+ int position = buffer_.Size();
+ // Use the offset field of the branch instruction for linking the sites.
+ EmitType5(cond, label->position_, link);
+ label->LinkTo(position);
+ }
+}
+
+void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), AND, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
+ CHECK_NE(rn, PC); // Reserve tst pc instruction for exception handler marker.
+ EmitType01(cond, so.type(), TST, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
+ CHECK_NE(rn, PC); // Reserve teq pc instruction for exception handler marker.
+ EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::orr(Register rd, Register rn,
+ ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::orrs(Register rd, Register rn,
+ ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
+ Condition cond) {
+ EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
+ EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_NE(rd, PC);
+ CHECK_NE(rm, PC);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 | B22 | B21 | (0xf << 16) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ (0xf << 8) | B4 | static_cast<int32_t>(rm);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ B25 | B24 | ((imm16 >> 12) << 16) |
+ static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+ B25 | B24 | B22 | ((imm16 >> 12) << 16) |
+ static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
+ Register rd, Register rn,
+ Register rm, Register rs) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(rs, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = opcode |
+ (static_cast<int32_t>(cond) << kConditionShift) |
+ (static_cast<int32_t>(rn) << kRnShift) |
+ (static_cast<int32_t>(rd) << kRdShift) |
+ (static_cast<int32_t>(rs) << kRsShift) |
+ B7 | B4 |
+ (static_cast<int32_t>(rm) << kRmShift);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+ // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
+ EmitMulOp(cond, 0, R0, rd, rn, rm);
+}
+
+
+void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
+ Condition cond) {
+ // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+ EmitMulOp(cond, B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
+ Condition cond) {
+ // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+ EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
+ Register rm, Condition cond) {
+ // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
+ EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
+}
+
+
+void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, true, false, rd, ad);
+}
+
+
+void ArmAssembler::str(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, false, false, rd, ad);
+}
+
+
+void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, true, true, rd, ad);
+}
+
+
+void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
+ EmitMemOp(cond, false, true, rd, ad);
+}
+
+
+void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
+ EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
+ CHECK_EQ(rd % 2, 0);
+ EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
+ CHECK_EQ(rd % 2, 0);
+ EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldm(BlockAddressMode am,
+ Register base,
+ RegList regs,
+ Condition cond) {
+ EmitMultiMemOp(cond, am, true, base, regs);
+}
+
+
+void ArmAssembler::stm(BlockAddressMode am,
+ Register base,
+ RegList regs,
+ Condition cond) {
+ EmitMultiMemOp(cond, am, false, base, regs);
+}
+
+
+void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 |
+ B23 |
+ L |
+ (static_cast<int32_t>(rn) << kLdExRnShift) |
+ (static_cast<int32_t>(rt) << kLdExRtShift) |
+ B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::strex(Register rd,
+ Register rt,
+ Register rn,
+ Condition cond) {
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 |
+ B23 |
+ (static_cast<int32_t>(rn) << kStrExRnShift) |
+ (static_cast<int32_t>(rd) << kStrExRdShift) |
+ B11 | B10 | B9 | B8 | B7 | B4 |
+ (static_cast<int32_t>(rt) << kStrExRtShift);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::clrex() {
+ int32_t encoding = (kSpecialCondition << kConditionShift) |
+ B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::nop(Condition cond) {
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B25 | B24 | B21 | (0xf << 12);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
+ CHECK_NE(sn, kNoSRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 |
+ ((static_cast<int32_t>(sn) >> 1)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
+ CHECK_NE(sn, kNoSRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B20 |
+ ((static_cast<int32_t>(sn) >> 1)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
+ Condition cond) {
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(sm, S31);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
+ Condition cond) {
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(sm, S31);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(rt, rt2);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 | B20 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+ ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
+ Condition cond) {
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+ ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
+ Condition cond) {
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rt, SP);
+ CHECK_NE(rt, PC);
+ CHECK_NE(rt2, kNoRegister);
+ CHECK_NE(rt2, SP);
+ CHECK_NE(rt2, PC);
+ CHECK_NE(rt, rt2);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B22 | B20 |
+ (static_cast<int32_t>(rt2)*B16) |
+ (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+ ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 | B20 |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ B11 | B9 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
+ CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ B11 | B9 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 | B20 |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ B11 | B9 | B8 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
+ CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B24 |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ B11 | B9 | B8 | ad.vencoding();
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
+ SRegister sd, SRegister sn, SRegister sm) {
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(sn, kNoSRegister);
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | opcode |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sn) >> 1)*B16) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ ((static_cast<int32_t>(sn) & 1)*B7) |
+ ((static_cast<int32_t>(sm) & 1)*B5) |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
+ DRegister dd, DRegister dn, DRegister dm) {
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(dn, kNoDRegister);
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | B8 | opcode |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dn) & 0xf)*B16) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ ((static_cast<int32_t>(dn) >> 4)*B7) |
+ ((static_cast<int32_t>(dm) >> 4)*B5) |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
+}
+
+
+bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
+ uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
+ if (((imm32 & ((1 << 19) - 1)) == 0) &&
+ ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
+ (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
+ uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
+ ((imm32 >> 19) & ((1 << 6) -1));
+ EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
+ sd, S0, S0);
+ return true;
+ }
+ return false;
+}
+
+
+bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
+ uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
+ if (((imm64 & ((1LL << 48) - 1)) == 0) &&
+ ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
+ (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
+ uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
+ ((imm64 >> 48) & ((1 << 6) -1));
+ EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
+ dd, D0, D0);
+ return true;
+ }
+ return false;
+}
+
+
+void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B21 | B20, sd, sn, sm);
+}
+
+
+void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B21 | B20, dd, dn, dm);
+}
+
+
+void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B21, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B21, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, 0, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, 0, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
+ Condition cond) {
+ EmitVFPsss(cond, B23, sd, sn, sm);
+}
+
+
+void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
+ Condition cond) {
+ EmitVFPddd(cond, B23, dd, dn, dm);
+}
+
+
+void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
+}
+
+void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
+ SRegister sd, DRegister dm) {
+ CHECK_NE(sd, kNoSRegister);
+ CHECK_NE(dm, kNoDRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | opcode |
+ ((static_cast<int32_t>(sd) & 1)*B22) |
+ ((static_cast<int32_t>(sd) >> 1)*B12) |
+ ((static_cast<int32_t>(dm) >> 4)*B5) |
+ (static_cast<int32_t>(dm) & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
+ DRegister dd, SRegister sm) {
+ CHECK_NE(dd, kNoDRegister);
+ CHECK_NE(sm, kNoSRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B11 | B9 | opcode |
+ ((static_cast<int32_t>(dd) >> 4)*B22) |
+ ((static_cast<int32_t>(dd) & 0xf)*B12) |
+ ((static_cast<int32_t>(sm) & 1)*B5) |
+ (static_cast<int32_t>(sm) >> 1);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
+ EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
+ EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
+ EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
+ EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
+ EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
+ EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
+ EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
+}
+
+
+void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
+ EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
+}
+
+
+void ArmAssembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
+ (static_cast<int32_t>(PC)*B12) |
+ B11 | B9 | B4;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::svc(uint32_t imm24) {
+ CHECK(IsUint(24, imm24)) << imm24;
+ int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
+ Emit(encoding);
+}
+
+
+void ArmAssembler::bkpt(uint16_t imm16) {
+ int32_t encoding = (AL << kConditionShift) | B24 | B21 |
+ ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
+ Emit(encoding);
+}
+
+
+void ArmAssembler::b(Label* label, Condition cond) {
+ EmitBranch(cond, label, false);
+}
+
+
+void ArmAssembler::bl(Label* label, Condition cond) {
+ EmitBranch(cond, label, true);
+}
+
+
+void ArmAssembler::blx(Register rm, Condition cond) {
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 | B21 | (0xfff << 8) | B5 | B4 |
+ (static_cast<int32_t>(rm) << kRmShift);
+ Emit(encoding);
+}
+
+void ArmAssembler::bx(Register rm, Condition cond) {
+ CHECK_NE(rm, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B24 | B21 | (0xfff << 8) | B4 |
+ (static_cast<int32_t>(rm) << kRmShift);
+ Emit(encoding);
+}
+
+void ArmAssembler::MarkExceptionHandler(Label* label) {
+ EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
+ Label l;
+ b(&l);
+ EmitBranch(AL, label, false);
+ Bind(&l);
+}
+
+
+void ArmAssembler::Bind(Label* label) {
+ CHECK(!label->IsBound());
+ int bound_pc = buffer_.Size();
+ while (label->IsLinked()) {
+ int32_t position = label->Position();
+ int32_t next = buffer_.Load<int32_t>(position);
+ int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
+ buffer_.Store<int32_t>(position, encoded);
+ label->position_ = ArmAssembler::DecodeBranchOffset(next);
+ }
+ label->BindTo(bound_pc);
+}
+
+
+void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
+ // TODO: Consider using movw ip, <16 bits>.
+ while (!IsUint(8, data)) {
+ tst(R0, ShifterOperand(data & 0xFF), VS);
+ data >>= 8;
+ }
+ tst(R0, ShifterOperand(data), MI);
+}
+
+
+int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
+ // The offset is off by 8 due to the way the ARM CPUs read PC.
+ offset -= 8;
+ CHECK_ALIGNED(offset, 4);
+ CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+
+ // Properly preserve only the bits supported in the instruction.
+ offset >>= 2;
+ offset &= kBranchOffsetMask;
+ return (inst & ~kBranchOffsetMask) | offset;
+}
+
+
+int ArmAssembler::DecodeBranchOffset(int32_t inst) {
+ // Sign-extend, left-shift by 2, then add 8.
+ return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
+}
+
+void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
+ AddConstant(rd, rd, value, cond);
+}
+
+
+void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
+ Condition cond) {
+ if (value == 0) {
+ if (rd != rn) {
+ mov(rd, ShifterOperand(rn), cond);
+ }
+ return;
+ }
+ // We prefer to select the shorter code sequence rather than selecting add for
+ // positive values and sub for negatives ones, which would slightly improve
+ // the readability of generated code for some constants.
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ add(rd, rn, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+ sub(rd, rn, shifter_op, cond);
+ } else {
+ CHECK(rn != IP);
+ if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ add(rd, rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ sub(rd, rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ add(rd, rn, ShifterOperand(IP), cond);
+ }
+ }
+}
+
+
+void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
+ Condition cond) {
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ adds(rd, rn, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+ subs(rd, rn, shifter_op, cond);
+ } else {
+ CHECK(rn != IP);
+ if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ adds(rd, rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ subs(rd, rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ adds(rd, rn, ShifterOperand(IP), cond);
+ }
+ }
+}
+
+
+void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ mov(rd, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(rd, shifter_op, cond);
+ } else {
+ movw(rd, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(rd, value_high, cond);
+ }
+ }
+}
+
+
+bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadWordPair:
+ return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ case kLoadSWord:
+ case kLoadDWord:
+ return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ return false;
+ }
+}
+
+
+bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreWordPair:
+ return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ case kStoreByte:
+ case kStoreWord:
+ return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ case kStoreSWord:
+ case kStoreDWord:
+ return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ return false;
+ }
+}
+
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset.
+void ArmAssembler::LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(type, offset)) {
+ CHECK(base != IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(type, offset));
+ switch (type) {
+ case kLoadSignedByte:
+ ldrsb(reg, Address(base, offset), cond);
+ break;
+ case kLoadUnsignedByte:
+ ldrb(reg, Address(base, offset), cond);
+ break;
+ case kLoadSignedHalfword:
+ ldrsh(reg, Address(base, offset), cond);
+ break;
+ case kLoadUnsignedHalfword:
+ ldrh(reg, Address(base, offset), cond);
+ break;
+ case kLoadWord:
+ ldr(reg, Address(base, offset), cond);
+ break;
+ case kLoadWordPair:
+ ldrd(reg, Address(base, offset), cond);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadSFromOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
+ vldrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadDFromOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
+ vldrd(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset.
+void ArmAssembler::StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(type, offset)) {
+ CHECK(reg != IP);
+ CHECK(base != IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(type, offset));
+ switch (type) {
+ case kStoreByte:
+ strb(reg, Address(base, offset), cond);
+ break;
+ case kStoreHalfword:
+ strh(reg, Address(base, offset), cond);
+ break;
+ case kStoreWord:
+ str(reg, Address(base, offset), cond);
+ break;
+ case kStoreWordPair:
+ strd(reg, Address(base, offset), cond);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
+void ArmAssembler::StoreSToOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
+ vstrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
+void ArmAssembler::StoreDToOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
+ CHECK_NE(base, IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
+ vstrd(reg, Address(base, offset), cond);
+}
+
+void ArmAssembler::Push(Register rd, Condition cond) {
+ str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
+}
+
+void ArmAssembler::Pop(Register rd, Condition cond) {
+ ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
+}
+
+void ArmAssembler::PushList(RegList regs, Condition cond) {
+ stm(DB_W, SP, regs, cond);
+}
+
+void ArmAssembler::PopList(RegList regs, Condition cond) {
+ ldm(IA_W, SP, regs, cond);
+}
+
+void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
+ if (rd != rm) {
+ mov(rd, ShifterOperand(rm), cond);
+ }
+}
+
+void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Do not use Lsl if no shift is wanted.
+ mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
+}
+
+void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Do not use Lsr if no shift is wanted.
+ if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
+ mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
+}
+
+void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Do not use Asr if no shift is wanted.
+ if (shift_imm == 32) shift_imm = 0; // Comply to UAL syntax.
+ mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
+}
+
+void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
+ Condition cond) {
+ CHECK_NE(shift_imm, 0u); // Use Rrx instruction.
+ mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
+}
+
+void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
+ mov(rd, ShifterOperand(rm, ROR, 0), cond);
+}
+
+void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+ // Push callee saves and link register.
+ RegList push_list = 1 << LR;
+ size_t pushed_values = 1;
+ for (size_t i = 0; i < callee_save_regs.size(); i++) {
+ Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+ push_list |= 1 << reg;
+ pushed_values++;
+ }
+ PushList(push_list);
+
+ // Increase frame to required size.
+ CHECK_GT(frame_size, pushed_values * kPointerSize); // Must be at least space to push Method*
+ size_t adjust = frame_size - (pushed_values * kPointerSize);
+ IncreaseFrameSize(adjust);
+
+ // Write out Method*.
+ StoreToOffset(kStoreWord, R0, SP, 0);
+
+ // Write out entry spills.
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Register reg = entry_spills.at(i).AsArm().AsCoreRegister();
+ StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
+ }
+}
+
+void ArmAssembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ // Compute callee saves to pop and PC
+ RegList pop_list = 1 << PC;
+ size_t pop_values = 1;
+ for (size_t i = 0; i < callee_save_regs.size(); i++) {
+ Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+ pop_list |= 1 << reg;
+ pop_values++;
+ }
+
+ // Decrease frame to start of callee saves
+ CHECK_GT(frame_size, pop_values * kPointerSize);
+ size_t adjust = frame_size - (pop_values * kPointerSize);
+ DecreaseFrameSize(adjust);
+
+ // Pop callee saves and PC
+ PopList(pop_list);
+}
+
+void ArmAssembler::IncreaseFrameSize(size_t adjust) {
+ AddConstant(SP, -adjust);
+}
+
+void ArmAssembler::DecreaseFrameSize(size_t adjust) {
+ AddConstant(SP, adjust);
+}
+
+void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ ArmManagedRegister src = msrc.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+ StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
+ SP, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ }
+}
+
+void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+ FrameOffset in_off, ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+}
+
+void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
+}
+
+static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
+ Register src_register, int32_t src_offset, size_t size) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+ } else if (dst.IsSRegister()) {
+ assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ }
+}
+
+void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
+}
+
+void ArmAssembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) {
+ return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
+}
+
+void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ vmovd(dst.AsDRegister(), src.AsDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ vmovs(dst.AsSRegister(), src.AsSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ } else {
+ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ }
+ }
+ }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ CHECK_EQ(size, 4u);
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
+ ManagedRegister /*scratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+
+void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
+ CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
+#if ANDROID_SMP != 0
+#if defined(__ARM_HAVE_DMB)
+ int32_t encoding = 0xf57ff05f; // dmb
+ Emit(encoding);
+#elif defined(__ARM_HAVE_LDREX_STREX)
+ LoadImmediate(R12, 0);
+ int32_t encoding = 0xee07cfba; // mcr p15, 0, r12, c7, c10, 5
+ Emit(encoding);
+#else
+ LoadImmediate(R12, 0xffff0fa0); // kuser_memory_barrier
+ blx(R12);
+#endif
+#endif
+}
+
+void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
+ FrameOffset sirt_offset,
+ ManagedRegister min_reg, bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
+ // the address in the SIRT holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+ SP, sirt_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ }
+ AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ } else {
+ AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ }
+}
+
+void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
+ FrameOffset sirt_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+ sirt_offset.Int32Value());
+ // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
+ // the address in the SIRT holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ } else {
+ AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ }
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ CHECK(in_reg.IsCoreRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ }
+ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+ in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ base.AsCoreRegister(), offset.Int32Value());
+ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(FrameOffset base, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, base.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ scratch.AsCoreRegister(), offset.Int32Value());
+ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
+ mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmAssembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister /*scratch*/) {
+ StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
+ buffer_.EnqueueSlowPath(slow);
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ TR, Thread::ExceptionOffset().Int32Value());
+ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ b(slow->Entry(), NE);
+}
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+ ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
+ // Pass exception object as argument
+ // Don't care about preserving R0 as this call won't return
+ __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+ // Set up call to Thread::Current()->pDeliverException
+ __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+ __ blx(R12);
+ // Call never returns
+ __ bkpt(0);
+#undef __
+}
+
+} // namespace arm
+} // namespace art
diff --git a/runtime/oat/utils/arm/assembler_arm.h b/runtime/oat/utils/arm/assembler_arm.h
new file mode 100644
index 0000000..06e0a55
--- /dev/null
+++ b/runtime/oat/utils/arm/assembler_arm.h
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
+#define ART_SRC_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
+
+#include <vector>
+
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "oat/utils/arm/managed_register_arm.h"
+#include "oat/utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Encodes Addressing Mode 1 - Data-processing operands defined in Section 5.1.
+class ShifterOperand {
+ public:
+ // Data-processing operands - Uninitialized
+ ShifterOperand() {
+ type_ = -1;
+ }
+
+ // Data-processing operands - Immediate
+ explicit ShifterOperand(uint32_t immediate) {
+ CHECK(immediate < (1 << kImmed8Bits));
+ type_ = 1;
+ encoding_ = immediate;
+ }
+
+ // Data-processing operands - Rotated immediate
+ ShifterOperand(uint32_t rotate, uint32_t immed8) {
+ CHECK((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
+ type_ = 1;
+ encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
+ }
+
+ // Data-processing operands - Register
+ explicit ShifterOperand(Register rm) {
+ type_ = 0;
+ encoding_ = static_cast<uint32_t>(rm);
+ }
+
+ // Data-processing operands - Logical shift/rotate by immediate
+ ShifterOperand(Register rm, Shift shift, uint32_t shift_imm) {
+ CHECK(shift_imm < (1 << kShiftImmBits));
+ type_ = 0;
+ encoding_ = shift_imm << kShiftImmShift |
+ static_cast<uint32_t>(shift) << kShiftShift |
+ static_cast<uint32_t>(rm);
+ }
+
+ // Data-processing operands - Logical shift/rotate by register
+ ShifterOperand(Register rm, Shift shift, Register rs) {
+ type_ = 0;
+ encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
+ static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
+ static_cast<uint32_t>(rm);
+ }
+
+ static bool CanHold(uint32_t immediate, ShifterOperand* shifter_op) {
+ // Avoid the more expensive test for frequent small immediate values.
+ if (immediate < (1 << kImmed8Bits)) {
+ shifter_op->type_ = 1;
+ shifter_op->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
+ return true;
+ }
+ // Note that immediate must be unsigned for the test to work correctly.
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
+ if (imm8 < (1 << kImmed8Bits)) {
+ shifter_op->type_ = 1;
+ shifter_op->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private:
+ bool is_valid() const { return (type_ == 0) || (type_ == 1); }
+
+ uint32_t type() const {
+ CHECK(is_valid());
+ return type_;
+ }
+
+ uint32_t encoding() const {
+ CHECK(is_valid());
+ return encoding_;
+ }
+
+ uint32_t type_; // Encodes the type field (bits 27-25) in the instruction.
+ uint32_t encoding_;
+
+ friend class ArmAssembler;
+#ifdef SOURCE_ASSEMBLER_SUPPORT
+ friend class BinaryAssembler;
+#endif
+};
+
+
+enum LoadOperandType {
+ kLoadSignedByte,
+ kLoadUnsignedByte,
+ kLoadSignedHalfword,
+ kLoadUnsignedHalfword,
+ kLoadWord,
+ kLoadWordPair,
+ kLoadSWord,
+ kLoadDWord
+};
+
+
+enum StoreOperandType {
+ kStoreByte,
+ kStoreHalfword,
+ kStoreWord,
+ kStoreWordPair,
+ kStoreSWord,
+ kStoreDWord
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddressMode {
+ // bit encoding P U W
+ DA = (0|0|0) << 21, // decrement after
+ IA = (0|4|0) << 21, // increment after
+ DB = (8|0|0) << 21, // decrement before
+ IB = (8|4|0) << 21, // increment before
+ DA_W = (0|0|1) << 21, // decrement after with writeback to base
+ IA_W = (0|4|1) << 21, // increment after with writeback to base
+ DB_W = (8|0|1) << 21, // decrement before with writeback to base
+ IB_W = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+class Address {
+ public:
+ // Memory operand addressing mode
+ enum Mode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (w/o writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+ };
+
+ explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
+ CHECK(IsAbsoluteUint(12, offset));
+ if (offset < 0) {
+ encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign.
+ } else {
+ encoding_ = am | offset;
+ }
+ encoding_ |= static_cast<uint32_t>(rn) << kRnShift;
+ }
+
+ static bool CanHoldLoadOffset(LoadOperandType type, int offset);
+ static bool CanHoldStoreOffset(StoreOperandType type, int offset);
+
+ private:
+ uint32_t encoding() const { return encoding_; }
+
+ // Encoding for addressing mode 3.
+ uint32_t encoding3() const {
+ const uint32_t offset_mask = (1 << 12) - 1;
+ uint32_t offset = encoding_ & offset_mask;
+ CHECK_LT(offset, 256u);
+ return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
+ }
+
+ // Encoding for vfp load/store addressing.
+ uint32_t vencoding() const {
+ const uint32_t offset_mask = (1 << 12) - 1;
+ uint32_t offset = encoding_ & offset_mask;
+ CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
+ CHECK_ALIGNED(offset, 2); // Multiple of 4.
+ int mode = encoding_ & ((8|4|1) << 21);
+ CHECK((mode == Offset) || (mode == NegOffset));
+ uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
+ if (mode == Offset) {
+ vencoding |= 1 << 23;
+ }
+ return vencoding;
+ }
+
+ uint32_t encoding_;
+
+ friend class ArmAssembler;
+};
+
+
+class ArmAssembler : public Assembler {
+ public:
+ ArmAssembler() {}
+ virtual ~ArmAssembler() {}
+
+ // Data-processing instructions.
+ void and_(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void eor(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void sub(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void subs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void rsb(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void rsbs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void add(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void adds(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void tst(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void teq(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void cmp(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void cmn(Register rn, ShifterOperand so, Condition cond = AL);
+
+ void orr(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+ void orrs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void mov(Register rd, ShifterOperand so, Condition cond = AL);
+ void movs(Register rd, ShifterOperand so, Condition cond = AL);
+
+ void bic(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+ void mvn(Register rd, ShifterOperand so, Condition cond = AL);
+ void mvns(Register rd, ShifterOperand so, Condition cond = AL);
+
+ // Miscellaneous data-processing instructions.
+ void clz(Register rd, Register rm, Condition cond = AL);
+ void movw(Register rd, uint16_t imm16, Condition cond = AL);
+ void movt(Register rd, uint16_t imm16, Condition cond = AL);
+
+ // Multiply instructions.
+ void mul(Register rd, Register rn, Register rm, Condition cond = AL);
+ void mla(Register rd, Register rn, Register rm, Register ra,
+ Condition cond = AL);
+ void mls(Register rd, Register rn, Register rm, Register ra,
+ Condition cond = AL);
+ void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+ Condition cond = AL);
+
+ // Load/store instructions.
+ void ldr(Register rd, Address ad, Condition cond = AL);
+ void str(Register rd, Address ad, Condition cond = AL);
+
+ void ldrb(Register rd, Address ad, Condition cond = AL);
+ void strb(Register rd, Address ad, Condition cond = AL);
+
+ void ldrh(Register rd, Address ad, Condition cond = AL);
+ void strh(Register rd, Address ad, Condition cond = AL);
+
+ void ldrsb(Register rd, Address ad, Condition cond = AL);
+ void ldrsh(Register rd, Address ad, Condition cond = AL);
+
+ void ldrd(Register rd, Address ad, Condition cond = AL);
+ void strd(Register rd, Address ad, Condition cond = AL);
+
+ void ldm(BlockAddressMode am, Register base,
+ RegList regs, Condition cond = AL);
+ void stm(BlockAddressMode am, Register base,
+ RegList regs, Condition cond = AL);
+
+ void ldrex(Register rd, Register rn, Condition cond = AL);
+ void strex(Register rd, Register rt, Register rn, Condition cond = AL);
+
+ // Miscellaneous instructions.
+ void clrex();
+ void nop(Condition cond = AL);
+
+ // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
+ void bkpt(uint16_t imm16);
+ void svc(uint32_t imm24);
+
+ // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
+ void vmovsr(SRegister sn, Register rt, Condition cond = AL);
+ void vmovrs(Register rt, SRegister sn, Condition cond = AL);
+ void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
+ void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
+ void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
+ void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
+ void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
+ void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
+
+ // Returns false if the immediate cannot be encoded.
+ bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
+ bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
+
+ void vldrs(SRegister sd, Address ad, Condition cond = AL);
+ void vstrs(SRegister sd, Address ad, Condition cond = AL);
+ void vldrd(DRegister dd, Address ad, Condition cond = AL);
+ void vstrd(DRegister dd, Address ad, Condition cond = AL);
+
+ void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+ void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+ void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+
+ void vabss(SRegister sd, SRegister sm, Condition cond = AL);
+ void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
+ void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
+ void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
+ void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
+ void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
+
+ void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
+ void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
+ void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
+ void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
+ void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
+ void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
+
+ void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
+ void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
+ void vcmpsz(SRegister sd, Condition cond = AL);
+ void vcmpdz(DRegister dd, Condition cond = AL);
+ void vmstat(Condition cond = AL); // VMRS APSR_nzcv, FPSCR
+
+ // Branch instructions.
+ void b(Label* label, Condition cond = AL);
+ void bl(Label* label, Condition cond = AL);
+ void blx(Register rm, Condition cond = AL);
+ void bx(Register rm, Condition cond = AL);
+
+ // Macros.
+ // Add signed constant value to rd. May clobber IP.
+ void AddConstant(Register rd, int32_t value, Condition cond = AL);
+ void AddConstant(Register rd, Register rn, int32_t value,
+ Condition cond = AL);
+ void AddConstantSetFlags(Register rd, Register rn, int32_t value,
+ Condition cond = AL);
+ void AddConstantWithCarry(Register rd, Register rn, int32_t value,
+ Condition cond = AL);
+
+ // Load and Store. May clobber IP.
+ void LoadImmediate(Register rd, int32_t value, Condition cond = AL);
+ void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
+ void LoadDImmediate(DRegister dd, double value,
+ Register scratch, Condition cond = AL);
+ void MarkExceptionHandler(Label* label);
+ void LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void LoadSFromOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void StoreSToOffset(SRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void LoadDFromOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+ void StoreDToOffset(DRegister reg,
+ Register base,
+ int32_t offset,
+ Condition cond = AL);
+
+ void Push(Register rd, Condition cond = AL);
+ void Pop(Register rd, Condition cond = AL);
+
+ void PushList(RegList regs, Condition cond = AL);
+ void PopList(RegList regs, Condition cond = AL);
+
+ void Mov(Register rd, Register rm, Condition cond = AL);
+
+ // Convenience shift instructions. Use mov instruction with shifter operand
+ // for variants setting the status flags or using a register shift count.
+ void Lsl(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Lsr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+ void Rrx(Register rd, Register rm, Condition cond = AL);
+
+ // Encode a signed constant in tst instructions, only affecting the flags.
+ void EncodeUint32InTstInstructions(uint32_t data);
+ // ... and decode from a pc pointing to the start of encoding instructions.
+ static uint32_t DecodeUint32FromTstInstructions(uword pc);
+ static bool IsInstructionForExceptionHandling(uword pc);
+
+ // Emit data (e.g. encoded instruction or immediate) to the
+ // instruction stream.
+ void Emit(int32_t value);
+ void Bind(Label* label);
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills);
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs);
+
+ virtual void IncreaseFrameSize(size_t adjust);
+ virtual void DecreaseFrameSize(size_t adjust);
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+ virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+ FrameOffset in_off, ManagedRegister scratch);
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+ virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src);
+
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs);
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+ Offset offs);
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest,
+ ThreadOffset offs);
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void MemoryBarrier(ManagedRegister scratch);
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr);
+ virtual void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch);
+
+ // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the SIRT entry to see if the value is
+ // NULL.
+ virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed.
+ virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ ManagedRegister scratch, bool null_allowed);
+
+ // src holds a SIRT entry (Object**) load this into dst
+ virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ ManagedRegister src);
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+ virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(FrameOffset base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ private:
+ void EmitType01(Condition cond,
+ int type,
+ Opcode opcode,
+ int set_cc,
+ Register rn,
+ Register rd,
+ ShifterOperand so);
+
+ void EmitType5(Condition cond, int offset, bool link);
+
+ void EmitMemOp(Condition cond,
+ bool load,
+ bool byte,
+ Register rd,
+ Address ad);
+
+ void EmitMemOpAddressMode3(Condition cond,
+ int32_t mode,
+ Register rd,
+ Address ad);
+
+ void EmitMultiMemOp(Condition cond,
+ BlockAddressMode am,
+ bool load,
+ Register base,
+ RegList regs);
+
+ void EmitShiftImmediate(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so);
+
+ void EmitShiftRegister(Condition cond,
+ Shift opcode,
+ Register rd,
+ Register rm,
+ ShifterOperand so);
+
+ void EmitMulOp(Condition cond,
+ int32_t opcode,
+ Register rd,
+ Register rn,
+ Register rm,
+ Register rs);
+
+ void EmitVFPsss(Condition cond,
+ int32_t opcode,
+ SRegister sd,
+ SRegister sn,
+ SRegister sm);
+
+ void EmitVFPddd(Condition cond,
+ int32_t opcode,
+ DRegister dd,
+ DRegister dn,
+ DRegister dm);
+
+ void EmitVFPsd(Condition cond,
+ int32_t opcode,
+ SRegister sd,
+ DRegister dm);
+
+ void EmitVFPds(Condition cond,
+ int32_t opcode,
+ DRegister dd,
+ SRegister sm);
+
+ void EmitBranch(Condition cond, Label* label, bool link);
+ static int32_t EncodeBranchOffset(int offset, int32_t inst);
+ static int DecodeBranchOffset(int32_t inst);
+ int32_t EncodeTstOffset(int offset, int32_t inst);
+ int DecodeTstOffset(int32_t inst);
+
+ // Returns whether or not the given register is used for passing parameters.
+ static int RegisterCompare(const Register* reg1, const Register* reg2) {
+ return *reg1 - *reg2;
+ }
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath : public SlowPath {
+ public:
+ explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
+ virtual void Emit(Assembler *sp_asm);
+ private:
+ const ArmManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
diff --git a/runtime/oat/utils/arm/managed_register_arm.cc b/runtime/oat/utils/arm/managed_register_arm.cc
new file mode 100644
index 0000000..57c2305
--- /dev/null
+++ b/runtime/oat/utils/arm/managed_register_arm.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_arm.h"
+
+#include "globals.h"
+
+namespace art {
+namespace arm {
+
+// We need all registers for caching of locals.
+// Register R9 .. R15 are reserved.
+static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
+static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+ kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Returns true if this managed-register overlaps the other managed-register.
+bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return ArmManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+ ArmManagedRegister::FromCoreRegister(high).Overlaps(other);
+ }
+ if (IsOverlappingDRegister()) {
+ if (other.IsDRegister()) return Equals(other);
+ if (other.IsSRegister()) {
+ SRegister low = AsOverlappingDRegisterLow();
+ SRegister high = AsOverlappingDRegisterHigh();
+ SRegister other_sreg = other.AsSRegister();
+ return (low == other_sreg) || (high == other_sreg);
+ }
+ return false;
+ }
+ if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int ArmManagedRegister::AllocIdLow() const {
+ CHECK(IsOverlappingDRegister() || IsRegisterPair());
+ const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ int low;
+ if (r < kNumberOfOverlappingDRegIds) {
+ CHECK(IsOverlappingDRegister());
+ low = (r * 2) + kNumberOfCoreRegIds; // Return a SRegister.
+ } else {
+ CHECK(IsRegisterPair());
+ low = (r - kNumberOfDRegIds) * 2; // Return a Register.
+ if (low > 6) {
+ // we didn't got a pair higher than R6_R7, must be the dalvik special case
+ low = 1;
+ }
+ }
+ return low;
+}
+
+
+int ArmManagedRegister::AllocIdHigh() const {
+ return AllocIdLow() + 1;
+}
+
+
+void ArmManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsCoreRegister()) {
+ os << "Core: " << static_cast<int>(AsCoreRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << static_cast<int>(AsRegisterPairLow()) << ", "
+ << static_cast<int>(AsRegisterPairHigh());
+ } else if (IsSRegister()) {
+ os << "SRegister: " << static_cast<int>(AsSRegister());
+ } else if (IsDRegister()) {
+ os << "DRegister: " << static_cast<int>(AsDRegister());
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& r) {
+ os << ArmManagedRegister::FromRegisterPair(r);
+ return os;
+}
+
+} // namespace arm
+} // namespace art
diff --git a/runtime/oat/utils/arm/managed_register_arm.h b/runtime/oat/utils/arm/managed_register_arm.h
new file mode 100644
index 0000000..b069f6d
--- /dev/null
+++ b/runtime/oat/utils/arm/managed_register_arm.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#define ART_SRC_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+
+#include "base/logging.h"
+#include "constants_arm.h"
+#include "oat/utils/managed_register.h"
+
+namespace art {
+namespace arm {
+
+// Values for register pairs.
+enum RegisterPair {
+ R0_R1 = 0,
+ R2_R3 = 1,
+ R4_R5 = 2,
+ R6_R7 = 3,
+ R1_R2 = 4, // Dalvik style passing
+ kNumberOfRegisterPairs = 5,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfSRegIds = kNumberOfSRegisters;
+const int kNumberOfSAllocIds = kNumberOfSRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds +
+ kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+ kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+// [0..R[ core registers (enum Register)
+// [R..S[ single precision VFP registers (enum SRegister)
+// [S..D[ double precision VFP registers (enum DRegister)
+// [D..P[ core register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCoreRegIds
+// S = R + kNumberOfSRegIds
+// D = S + kNumberOfDRegIds
+// P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ core registers (enum Register)
+// [R..S[ single precision VFP registers (enum SRegister)
+// [S..N[ non-overlapping double precision VFP registers (16-31 in enum
+// DRegister, VFPv3-D32 only)
+// where
+// R = kNumberOfCoreAllocIds
+// S = R + kNumberOfSAllocIds
+// N = S + kNumberOfDAllocIds
+
+
+// An instance of class 'ManagedRegister' represents a single ARM register or a
+// pair of core ARM registers (enum RegisterPair). A single register is either a
+// core register (enum Register), a VFP single precision register
+// (enum SRegister), or a VFP double precision register (enum DRegister).
+// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class ArmManagedRegister : public ManagedRegister {
+ public:
+ Register AsCoreRegister() const {
+ CHECK(IsCoreRegister());
+ return static_cast<Register>(id_);
+ }
+
+ SRegister AsSRegister() const {
+ CHECK(IsSRegister());
+ return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
+ }
+
+ DRegister AsDRegister() const {
+ CHECK(IsDRegister());
+ return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+ }
+
+ SRegister AsOverlappingDRegisterLow() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<SRegister>(d_reg * 2);
+ }
+
+ SRegister AsOverlappingDRegisterHigh() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<SRegister>(d_reg * 2 + 1);
+ }
+
+ RegisterPair AsRegisterPair() const {
+ CHECK(IsRegisterPair());
+ Register reg_low = AsRegisterPairLow();
+ if (reg_low == R1) {
+ return R1_R2;
+ } else {
+ return static_cast<RegisterPair>(reg_low / 2);
+ }
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCoreRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCoreRegister();
+ }
+
+ bool IsCoreRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+ }
+
+ bool IsSRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCoreRegIds;
+ return (0 <= test) && (test < kNumberOfSRegIds);
+ }
+
+ bool IsDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ return (0 <= test) && (test < kNumberOfDRegIds);
+ }
+
+ // Returns true if this DRegister overlaps SRegisters.
+ bool IsOverlappingDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test =
+ id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ bool IsSameType(ArmManagedRegister test) const {
+ CHECK(IsValidManagedRegister() && test.IsValidManagedRegister());
+ return
+ (IsCoreRegister() && test.IsCoreRegister()) ||
+ (IsSRegister() && test.IsSRegister()) ||
+ (IsDRegister() && test.IsDRegister()) ||
+ (IsRegisterPair() && test.IsRegisterPair());
+ }
+
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const ArmManagedRegister& other) const;
+
+ void Print(std::ostream& os) const;
+
+ static ArmManagedRegister FromCoreRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static ArmManagedRegister FromSRegister(SRegister r) {
+ CHECK_NE(r, kNoSRegister);
+ return FromRegId(r + kNumberOfCoreRegIds);
+ }
+
+ static ArmManagedRegister FromDRegister(DRegister r) {
+ CHECK_NE(r, kNoDRegister);
+ return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds));
+ }
+
+ static ArmManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCoreRegIds +
+ kNumberOfSRegIds + kNumberOfDRegIds));
+ }
+
+ // Return a RegisterPair consisting of Register r_low and r_low + 1.
+ static ArmManagedRegister FromCoreRegisterPair(Register r_low) {
+ if (r_low != R1) { // not the dalvik special case
+ CHECK_NE(r_low, kNoRegister);
+ CHECK_EQ(0, (r_low % 2));
+ const int r = r_low / 2;
+ CHECK_LT(r, kNumberOfPairRegIds);
+ return FromRegisterPair(static_cast<RegisterPair>(r));
+ } else {
+ return FromRegisterPair(R1_R2);
+ }
+ }
+
+ // Return a DRegister overlapping SRegister r_low and r_low + 1.
+ static ArmManagedRegister FromSRegisterPair(SRegister r_low) {
+ CHECK_NE(r_low, kNoSRegister);
+ CHECK_EQ(0, (r_low % 2));
+ const int r = r_low / 2;
+ CHECK_LT(r, kNumberOfOverlappingDRegIds);
+ return FromDRegister(static_cast<DRegister>(r));
+ }
+
+ private:
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() &&
+ !IsOverlappingDRegister() && !IsRegisterPair());
+ int r = id_;
+ if ((kNumberOfDAllocIds > 0) && IsDRegister()) { // VFPv3-D32 only.
+ r -= kNumberOfOverlappingDRegIds;
+ }
+ CHECK_LT(r, kNumberOfAllocIds);
+ return r;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ friend class ManagedRegister;
+
+ explicit ArmManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+ static ArmManagedRegister FromRegId(int reg_id) {
+ ArmManagedRegister reg(reg_id);
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg);
+
+} // namespace arm
+
+inline arm::ArmManagedRegister ManagedRegister::AsArm() const {
+ arm::ArmManagedRegister reg(id_);
+ CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+ return reg;
+}
+
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
diff --git a/runtime/oat/utils/arm/managed_register_arm_test.cc b/runtime/oat/utils/arm/managed_register_arm_test.cc
new file mode 100644
index 0000000..f5d4cc0
--- /dev/null
+++ b/runtime/oat/utils/arm/managed_register_arm_test.cc
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_arm.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace arm {
+
+TEST(ArmManagedRegister, NoRegister) {
+ ArmManagedRegister reg = ManagedRegister::NoRegister().AsArm();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(ArmManagedRegister, CoreRegister) {
+ ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R0, reg.AsCoreRegister());
+
+ reg = ArmManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R1, reg.AsCoreRegister());
+
+ reg = ArmManagedRegister::FromCoreRegister(R8);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R8, reg.AsCoreRegister());
+
+ reg = ArmManagedRegister::FromCoreRegister(R15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R15, reg.AsCoreRegister());
+}
+
+
+TEST(ArmManagedRegister, SRegister) {
+ ArmManagedRegister reg = ArmManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S0, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S1, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S3);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S3, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S15, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S30);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S30, reg.AsSRegister());
+
+ reg = ArmManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S31, reg.AsSRegister());
+}
+
+
+TEST(ArmManagedRegister, DRegister) {
+ ArmManagedRegister reg = ArmManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D0, reg.AsDRegister());
+ EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S0)));
+
+ reg = ArmManagedRegister::FromDRegister(D1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D1, reg.AsDRegister());
+ EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S2)));
+
+ reg = ArmManagedRegister::FromDRegister(D6);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D6, reg.AsDRegister());
+ EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S12)));
+
+ reg = ArmManagedRegister::FromDRegister(D14);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D14, reg.AsDRegister());
+ EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S28)));
+
+ reg = ArmManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D15, reg.AsDRegister());
+ EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S30)));
+
+#ifdef VFPv3_D32
+ reg = ArmManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D16, reg.AsDRegister());
+
+ reg = ArmManagedRegister::FromDRegister(D18);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D18, reg.AsDRegister());
+
+ reg = ArmManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D30, reg.AsDRegister());
+
+ reg = ArmManagedRegister::FromDRegister(D31);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D31, reg.AsDRegister());
+#endif // VFPv3_D32
+}
+
+
+TEST(ArmManagedRegister, Pair) {
+ ArmManagedRegister reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R0_R1, reg.AsRegisterPair());
+ EXPECT_EQ(R0, reg.AsRegisterPairLow());
+ EXPECT_EQ(R1, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R0)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R1_R2);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R1_R2, reg.AsRegisterPair());
+ EXPECT_EQ(R1, reg.AsRegisterPairLow());
+ EXPECT_EQ(R2, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R1)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R2_R3);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R2_R3, reg.AsRegisterPair());
+ EXPECT_EQ(R2, reg.AsRegisterPairLow());
+ EXPECT_EQ(R3, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R2)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R4_R5, reg.AsRegisterPair());
+ EXPECT_EQ(R4, reg.AsRegisterPairLow());
+ EXPECT_EQ(R5, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R4)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R6_R7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R6_R7, reg.AsRegisterPair());
+ EXPECT_EQ(R6, reg.AsRegisterPairLow());
+ EXPECT_EQ(R7, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R6)));
+}
+
+
+TEST(ArmManagedRegister, Equals) {
+ ManagedRegister no_reg = ManagedRegister::NoRegister();
+ EXPECT_TRUE(no_reg.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_R0 = ArmManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_R1 = ArmManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_R8 = ArmManagedRegister::FromCoreRegister(R8);
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_S0 = ArmManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(reg_S0.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_S1 = ArmManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg_S1.Equals(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_S31 = ArmManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg_S31.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D0 = ArmManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg_D0.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D15 = ArmManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg_D15.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+#ifdef VFPv3_D32
+ ArmManagedRegister reg_D16 = ArmManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg_D16.Equals(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D30 = ArmManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(reg_D30.Equals(ArmManagedRegister::FromDRegister(D30)));
+ EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+ ArmManagedRegister reg_D31 = ArmManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D30)));
+ EXPECT_TRUE(reg_D31.Equals(ArmManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+#endif // VFPv3_D32
+
+ ArmManagedRegister reg_R0R1 = ArmManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R2_R3)));
+
+ ArmManagedRegister reg_R4R5 = ArmManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+
+ ArmManagedRegister reg_R6R7 = ArmManagedRegister::FromRegisterPair(R6_R7);
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+ EXPECT_TRUE(reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+}
+
+
+TEST(ArmManagedRegister, Overlaps) {
+ ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromCoreRegister(R7);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S15);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D7);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+#ifdef VFPv3_D32
+ reg = ArmManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromDRegister(D31);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+#endif // VFPv3_D32
+
+ reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+}
+
+} // namespace arm
+} // namespace art
diff --git a/runtime/oat/utils/assembler.cc b/runtime/oat/utils/assembler.cc
new file mode 100644
index 0000000..92ce0b8
--- /dev/null
+++ b/runtime/oat/utils/assembler.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "arm/assembler_arm.h"
+#include "mips/assembler_mips.h"
+#include "x86/assembler_x86.h"
+#include "globals.h"
+#include "memory_region.h"
+
+namespace art {
+
+static byte* NewContents(size_t capacity) {
+ return new byte[capacity];
+}
+
+
+AssemblerBuffer::AssemblerBuffer() {
+ static const size_t kInitialBufferCapacity = 4 * KB;
+ contents_ = NewContents(kInitialBufferCapacity);
+ cursor_ = contents_;
+ limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
+ fixup_ = NULL;
+ slow_path_ = NULL;
+#ifndef NDEBUG
+ has_ensured_capacity_ = false;
+ fixups_processed_ = false;
+#endif
+
+ // Verify internal state.
+ CHECK_EQ(Capacity(), kInitialBufferCapacity);
+ CHECK_EQ(Size(), 0U);
+}
+
+
+AssemblerBuffer::~AssemblerBuffer() {
+ delete[] contents_;
+}
+
+
+void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
+ AssemblerFixup* fixup = fixup_;
+ while (fixup != NULL) {
+ fixup->Process(region, fixup->position());
+ fixup = fixup->previous();
+ }
+}
+
+
+void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) {
+ // Copy the instructions from the buffer.
+ MemoryRegion from(reinterpret_cast<void*>(contents()), Size());
+ instructions.CopyFrom(0, from);
+ // Process fixups in the instructions.
+ ProcessFixups(instructions);
+#ifndef NDEBUG
+ fixups_processed_ = true;
+#endif
+}
+
+
+void AssemblerBuffer::ExtendCapacity() {
+ size_t old_size = Size();
+ size_t old_capacity = Capacity();
+ size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB);
+
+ // Allocate the new data area and copy contents of the old one to it.
+ byte* new_contents = NewContents(new_capacity);
+ memmove(reinterpret_cast<void*>(new_contents),
+ reinterpret_cast<void*>(contents_),
+ old_size);
+
+ // Compute the relocation delta and switch to the new contents area.
+ ptrdiff_t delta = new_contents - contents_;
+ contents_ = new_contents;
+
+ // Update the cursor and recompute the limit.
+ cursor_ += delta;
+ limit_ = ComputeLimit(new_contents, new_capacity);
+
+ // Verify internal state.
+ CHECK_EQ(Capacity(), new_capacity);
+ CHECK_EQ(Size(), old_size);
+}
+
+
+Assembler* Assembler::Create(InstructionSet instruction_set) {
+ switch (instruction_set) {
+ case kArm:
+ case kThumb2:
+ return new arm::ArmAssembler();
+ case kMips:
+ return new mips::MipsAssembler();
+ case kX86:
+ return new x86::X86Assembler();
+ default:
+ LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+ return NULL;
+ }
+}
+
+} // namespace art
diff --git a/runtime/oat/utils/assembler.h b/runtime/oat/utils/assembler.h
new file mode 100644
index 0000000..cbf145b
--- /dev/null
+++ b/runtime/oat/utils/assembler.h
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_ASSEMBLER_H_
+#define ART_SRC_OAT_UTILS_ASSEMBLER_H_
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "constants_arm.h"
+#include "constants_mips.h"
+#include "constants_x86.h"
+#include "instruction_set.h"
+#include "managed_register.h"
+#include "memory_region.h"
+#include "offsets.h"
+
+namespace art {
+
+class Assembler;
+class AssemblerBuffer;
+class AssemblerFixup;
+
+namespace arm {
+ class ArmAssembler;
+}
+namespace mips {
+ class MipsAssembler;
+}
+namespace x86 {
+ class X86Assembler;
+}
+
+class Label {
+ public:
+ Label() : position_(0) {}
+
+ ~Label() {
+ // Assert if label is being destroyed with unresolved branches pending.
+ CHECK(!IsLinked());
+ }
+
+ // Returns the position for bound and linked labels. Cannot be used
+ // for unused labels.
+ int Position() const {
+ CHECK(!IsUnused());
+ return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
+ }
+
+ int LinkPosition() const {
+ CHECK(IsLinked());
+ return position_ - kWordSize;
+ }
+
+ bool IsBound() const { return position_ < 0; }
+ bool IsUnused() const { return position_ == 0; }
+ bool IsLinked() const { return position_ > 0; }
+
+ private:
+ int position_;
+
+ void Reinitialize() {
+ position_ = 0;
+ }
+
+ void BindTo(int position) {
+ CHECK(!IsBound());
+ position_ = -position - kPointerSize;
+ CHECK(IsBound());
+ }
+
+ void LinkTo(int position) {
+ CHECK(!IsBound());
+ position_ = position + kPointerSize;
+ CHECK(IsLinked());
+ }
+
+ friend class arm::ArmAssembler;
+ friend class mips::MipsAssembler;
+ friend class x86::X86Assembler;
+
+ DISALLOW_COPY_AND_ASSIGN(Label);
+};
+
+
+// Assembler fixups are positions in generated code that require processing
+// after the code has been copied to executable memory. This includes building
+// relocation information.
+class AssemblerFixup {
+ public:
+ virtual void Process(const MemoryRegion& region, int position) = 0;
+ virtual ~AssemblerFixup() {}
+
+ private:
+ AssemblerFixup* previous_;
+ int position_;
+
+ AssemblerFixup* previous() const { return previous_; }
+ void set_previous(AssemblerFixup* previous) { previous_ = previous; }
+
+ int position() const { return position_; }
+ void set_position(int position) { position_ = position; }
+
+ friend class AssemblerBuffer;
+};
+
+// Parent of all queued slow paths, emitted during finalization
+class SlowPath {
+ public:
+ SlowPath() : next_(NULL) {}
+ virtual ~SlowPath() {}
+
+ Label* Continuation() { return &continuation_; }
+ Label* Entry() { return &entry_; }
+ // Generate code for slow path
+ virtual void Emit(Assembler *sp_asm) = 0;
+
+ protected:
+ // Entry branched to by fast path
+ Label entry_;
+ // Optional continuation that is branched to at the end of the slow path
+ Label continuation_;
+ // Next in linked list of slow paths
+ SlowPath *next_;
+
+ friend class AssemblerBuffer;
+ DISALLOW_COPY_AND_ASSIGN(SlowPath);
+};
+
+class AssemblerBuffer {
+ public:
+ AssemblerBuffer();
+ ~AssemblerBuffer();
+
+ // Basic support for emitting, loading, and storing.
+ template<typename T> void Emit(T value) {
+ CHECK(HasEnsuredCapacity());
+ *reinterpret_cast<T*>(cursor_) = value;
+ cursor_ += sizeof(T);
+ }
+
+ template<typename T> T Load(size_t position) {
+ CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
+ return *reinterpret_cast<T*>(contents_ + position);
+ }
+
+ template<typename T> void Store(size_t position, T value) {
+ CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
+ *reinterpret_cast<T*>(contents_ + position) = value;
+ }
+
+ // Emit a fixup at the current location.
+ void EmitFixup(AssemblerFixup* fixup) {
+ fixup->set_previous(fixup_);
+ fixup->set_position(Size());
+ fixup_ = fixup;
+ }
+
+ void EnqueueSlowPath(SlowPath* slowpath) {
+ if (slow_path_ == NULL) {
+ slow_path_ = slowpath;
+ } else {
+ SlowPath* cur = slow_path_;
+ for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+ cur->next_ = slowpath;
+ }
+ }
+
+ void EmitSlowPaths(Assembler* sp_asm) {
+ SlowPath* cur = slow_path_;
+ SlowPath* next = NULL;
+ slow_path_ = NULL;
+ for ( ; cur != NULL ; cur = next) {
+ cur->Emit(sp_asm);
+ next = cur->next_;
+ delete cur;
+ }
+ }
+
+ // Get the size of the emitted code.
+ size_t Size() const {
+ CHECK_GE(cursor_, contents_);
+ return cursor_ - contents_;
+ }
+
+ byte* contents() const { return contents_; }
+
+ // Copy the assembled instructions into the specified memory block
+ // and apply all fixups.
+ void FinalizeInstructions(const MemoryRegion& region);
+
+ // To emit an instruction to the assembler buffer, the EnsureCapacity helper
+ // must be used to guarantee that the underlying data area is big enough to
+ // hold the emitted instruction. Usage:
+ //
+ // AssemblerBuffer buffer;
+ // AssemblerBuffer::EnsureCapacity ensured(&buffer);
+ // ... emit bytes for single instruction ...
+
+#ifndef NDEBUG
+
+ class EnsureCapacity {
+ public:
+ explicit EnsureCapacity(AssemblerBuffer* buffer) {
+ if (buffer->cursor() >= buffer->limit()) {
+ buffer->ExtendCapacity();
+ }
+ // In debug mode, we save the assembler buffer along with the gap
+ // size before we start emitting to the buffer. This allows us to
+ // check that any single generated instruction doesn't overflow the
+ // limit implied by the minimum gap size.
+ buffer_ = buffer;
+ gap_ = ComputeGap();
+ // Make sure that extending the capacity leaves a big enough gap
+ // for any kind of instruction.
+ CHECK_GE(gap_, kMinimumGap);
+ // Mark the buffer as having ensured the capacity.
+ CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest.
+ buffer->has_ensured_capacity_ = true;
+ }
+
+ ~EnsureCapacity() {
+ // Unmark the buffer, so we cannot emit after this.
+ buffer_->has_ensured_capacity_ = false;
+ // Make sure the generated instruction doesn't take up more
+ // space than the minimum gap.
+ int delta = gap_ - ComputeGap();
+ CHECK_LE(delta, kMinimumGap);
+ }
+
+ private:
+ AssemblerBuffer* buffer_;
+ int gap_;
+
+ int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
+ };
+
+ bool has_ensured_capacity_;
+ bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
+
+#else
+
+ class EnsureCapacity {
+ public:
+ explicit EnsureCapacity(AssemblerBuffer* buffer) {
+ if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
+ }
+ };
+
+ // When building the C++ tests, assertion code is enabled. To allow
+ // asserting that the user of the assembler buffer has ensured the
+ // capacity needed for emitting, we add a dummy method in non-debug mode.
+ bool HasEnsuredCapacity() const { return true; }
+
+#endif
+
+ // Returns the position in the instruction stream.
+ int GetPosition() { return cursor_ - contents_; }
+
+ private:
+ // The limit is set to kMinimumGap bytes before the end of the data area.
+ // This leaves enough space for the longest possible instruction and allows
+ // for a single, fast space check per instruction.
+ static const int kMinimumGap = 32;
+
+ byte* contents_;
+ byte* cursor_;
+ byte* limit_;
+ AssemblerFixup* fixup_;
+ bool fixups_processed_;
+
+ // Head of linked list of slow paths
+ SlowPath* slow_path_;
+
+ byte* cursor() const { return cursor_; }
+ byte* limit() const { return limit_; }
+ size_t Capacity() const {
+ CHECK_GE(limit_, contents_);
+ return (limit_ - contents_) + kMinimumGap;
+ }
+
+ // Process the fixup chain starting at the given fixup. The offset is
+ // non-zero for fixups in the body if the preamble is non-empty.
+ void ProcessFixups(const MemoryRegion& region);
+
+ // Compute the limit based on the data area and the capacity. See
+ // description of kMinimumGap for the reasoning behind the value.
+ static byte* ComputeLimit(byte* data, size_t capacity) {
+ return data + capacity - kMinimumGap;
+ }
+
+ void ExtendCapacity();
+
+ friend class AssemblerFixup;
+};
+
+class Assembler {
+ public:
+ static Assembler* Create(InstructionSet instruction_set);
+
+ // Emit slow paths queued during assembly
+ void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
+
+ // Size of generated code
+ size_t CodeSize() const { return buffer_.Size(); }
+
+ // Copy instructions out of assembly buffer into the given region of memory
+ void FinalizeInstructions(const MemoryRegion& region) {
+ buffer_.FinalizeInstructions(region);
+ }
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills) = 0;
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) = 0;
+
+ virtual void IncreaseFrameSize(size_t adjust) = 0;
+ virtual void DecreaseFrameSize(size_t adjust) = 0;
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch) = 0;
+
+ virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch) = 0;
+
+ virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0;
+
+ virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+ FrameOffset in_off, ManagedRegister scratch) = 0;
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
+
+ virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0;
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
+
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs) = 0;
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+ Offset offs) = 0;
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest,
+ ThreadOffset offs) = 0;
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch) = 0;
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) = 0;
+
+ virtual void MemoryBarrier(ManagedRegister scratch) = 0;
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr) = 0;
+ virtual void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch) = 0;
+
+ // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the SIRT entry to see if the value is
+ // NULL.
+ virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ ManagedRegister in_reg, bool null_allowed) = 0;
+
+ // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed.
+ virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ ManagedRegister scratch, bool null_allowed) = 0;
+
+ // src holds a SIRT entry (Object**) load this into dst
+ virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ ManagedRegister src) = 0;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
+ virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset,
+ ManagedRegister scratch) = 0;
+ virtual void Call(FrameOffset base, Offset offset,
+ ManagedRegister scratch) = 0;
+ virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+
+ virtual ~Assembler() {}
+
+ protected:
+ Assembler() : buffer_() {}
+
+ AssemblerBuffer buffer_;
+};
+
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_ASSEMBLER_H_
diff --git a/runtime/oat/utils/managed_register.h b/runtime/oat/utils/managed_register.h
new file mode 100644
index 0000000..a3d5795
--- /dev/null
+++ b/runtime/oat/utils/managed_register.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_MANAGED_REGISTER_H_
+#define ART_SRC_OAT_UTILS_MANAGED_REGISTER_H_
+
+namespace art {
+
+namespace arm {
+class ArmManagedRegister;
+}
+namespace mips {
+class MipsManagedRegister;
+}
+namespace x86 {
+class X86ManagedRegister;
+}
+
+class ManagedRegister {
+ public:
+ // ManagedRegister is a value class. There exists no method to change the
+ // internal state. We therefore allow a copy constructor and an
+ // assignment-operator.
+ ManagedRegister(const ManagedRegister& other) : id_(other.id_) { }
+
+ ManagedRegister& operator=(const ManagedRegister& other) {
+ id_ = other.id_;
+ return *this;
+ }
+
+ arm::ArmManagedRegister AsArm() const;
+ mips::MipsManagedRegister AsMips() const;
+ x86::X86ManagedRegister AsX86() const;
+
+ // It is valid to invoke Equals on and with a NoRegister.
+ bool Equals(const ManagedRegister& other) const {
+ return id_ == other.id_;
+ }
+
+ bool IsNoRegister() const {
+ return id_ == kNoRegister;
+ }
+
+ static ManagedRegister NoRegister() {
+ return ManagedRegister();
+ }
+
+ protected:
+ static const int kNoRegister = -1;
+
+ ManagedRegister() : id_(kNoRegister) { }
+ explicit ManagedRegister(int reg_id) : id_(reg_id) { }
+
+ int id_;
+};
+
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_MANAGED_REGISTER_H_
diff --git a/runtime/oat/utils/mips/assembler_mips.cc b/runtime/oat/utils/mips/assembler_mips.cc
new file mode 100644
index 0000000..25ba9b2
--- /dev/null
+++ b/runtime/oat/utils/mips/assembler_mips.cc
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips.h"
+
+#include "base/casts.h"
+#include "memory_region.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+namespace mips {
+#if 0
+class DirectCallRelocation : public AssemblerFixup {
+ public:
+ void Process(const MemoryRegion& region, int position) {
+ // Direct calls are relative to the following instruction on mips.
+ int32_t pointer = region.Load<int32_t>(position);
+ int32_t start = reinterpret_cast<int32_t>(region.start());
+ int32_t delta = start + position + sizeof(int32_t);
+ region.Store<int32_t>(position, pointer - delta);
+ }
+};
+#endif
+
+static const char* kRegisterNames[] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra",
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= ZERO && rhs <= RA) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const FRegister& rhs) {
+ if (rhs >= F0 && rhs < kNumberOfFRegisters) {
+ os << "f" << static_cast<int>(rhs);
+ } else {
+ os << "FRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+ if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+ os << "d" << static_cast<int>(rhs);
+ } else {
+ os << "DRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+void MipsAssembler::Emit(int32_t value) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<int32_t>(value);
+}
+
+void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) {
+ CHECK_NE(rs, kNoRegister);
+ CHECK_NE(rt, kNoRegister);
+ CHECK_NE(rd, kNoRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ static_cast<int32_t>(rs) << kRsShift |
+ static_cast<int32_t>(rt) << kRtShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
+ CHECK_NE(rs, kNoRegister);
+ CHECK_NE(rt, kNoRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ static_cast<int32_t>(rs) << kRsShift |
+ static_cast<int32_t>(rt) << kRtShift |
+ imm;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitJ(int opcode, int address) {
+ int32_t encoding = opcode << kOpcodeShift |
+ address;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) {
+ CHECK_NE(ft, kNoFRegister);
+ CHECK_NE(fs, kNoFRegister);
+ CHECK_NE(fd, kNoFRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<int32_t>(ft) << kFtShift |
+ static_cast<int32_t>(fs) << kFsShift |
+ static_cast<int32_t>(fd) << kFdShift |
+ funct;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) {
+ CHECK_NE(rt, kNoFRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<int32_t>(rt) << kRtShift |
+ imm;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) {
+ int offset;
+ if (label->IsBound()) {
+ offset = label->Position() - buffer_.Size();
+ } else {
+ // Use the offset field of the branch instruction for linking the sites.
+ offset = label->position_;
+ label->LinkTo(buffer_.Size());
+ }
+ if (equal) {
+ Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
+ } else {
+ Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
+ }
+}
+
+void MipsAssembler::EmitJump(Label* label, bool link) {
+ int offset;
+ if (label->IsBound()) {
+ offset = label->Position() - buffer_.Size();
+ } else {
+ // Use the offset field of the jump instruction for linking the sites.
+ offset = label->position_;
+ label->LinkTo(buffer_.Size());
+ }
+ if (link) {
+ Jal((offset >> 2) & kJumpOffsetMask);
+ } else {
+ J((offset >> 2) & kJumpOffsetMask);
+ }
+}
+
+int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
+ CHECK_ALIGNED(offset, 4);
+ CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset;
+
+ // Properly preserve only the bits supported in the instruction.
+ offset >>= 2;
+ if (is_jump) {
+ offset &= kJumpOffsetMask;
+ return (inst & ~kJumpOffsetMask) | offset;
+ } else {
+ offset &= kBranchOffsetMask;
+ return (inst & ~kBranchOffsetMask) | offset;
+ }
+}
+
+int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
+ // Sign-extend, then left-shift by 2.
+ if (is_jump) {
+ return (((inst & kJumpOffsetMask) << 6) >> 4);
+ } else {
+ return (((inst & kBranchOffsetMask) << 16) >> 14);
+ }
+}
+
+void MipsAssembler::Bind(Label* label, bool is_jump) {
+ CHECK(!label->IsBound());
+ int bound_pc = buffer_.Size();
+ while (label->IsLinked()) {
+ int32_t position = label->Position();
+ int32_t next = buffer_.Load<int32_t>(position);
+ int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
+ int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump);
+ buffer_.Store<int32_t>(position, encoded);
+ label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump);
+ }
+ label->BindTo(bound_pc);
+}
+
+void MipsAssembler::Add(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x20);
+}
+
+void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x21);
+}
+
+void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x8, rs, rt, imm16);
+}
+
+void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x9, rs, rt, imm16);
+}
+
+void MipsAssembler::Sub(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x22);
+}
+
+void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x23);
+}
+
+void MipsAssembler::Mult(Register rs, Register rt) {
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
+}
+
+void MipsAssembler::Multu(Register rs, Register rt) {
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
+}
+
+void MipsAssembler::Div(Register rs, Register rt) {
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
+}
+
+void MipsAssembler::Divu(Register rs, Register rt) {
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
+}
+
+void MipsAssembler::And(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x24);
+}
+
+void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0xc, rs, rt, imm16);
+}
+
+void MipsAssembler::Or(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x25);
+}
+
+void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0xd, rs, rt, imm16);
+}
+
+void MipsAssembler::Xor(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x26);
+}
+
+void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0xe, rs, rt, imm16);
+}
+
+void MipsAssembler::Nor(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x27);
+}
+
+void MipsAssembler::Sll(Register rd, Register rs, int shamt) {
+ EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x00);
+}
+
+void MipsAssembler::Srl(Register rd, Register rs, int shamt) {
+ EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x02);
+}
+
+void MipsAssembler::Sra(Register rd, Register rs, int shamt) {
+ EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x03);
+}
+
+void MipsAssembler::Sllv(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x04);
+}
+
+void MipsAssembler::Srlv(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x06);
+}
+
+void MipsAssembler::Srav(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x07);
+}
+
+void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x20, rs, rt, imm16);
+}
+
+void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x21, rs, rt, imm16);
+}
+
+void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x23, rs, rt, imm16);
+}
+
+void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x24, rs, rt, imm16);
+}
+
+void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x25, rs, rt, imm16);
+}
+
+void MipsAssembler::Lui(Register rt, uint16_t imm16) {
+ EmitI(0xf, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Mfhi(Register rd) {
+ EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x10);
+}
+
+void MipsAssembler::Mflo(Register rd) {
+ EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x12);
+}
+
+void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x28, rs, rt, imm16);
+}
+
+void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x29, rs, rt, imm16);
+}
+
+void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x2b, rs, rt, imm16);
+}
+
+void MipsAssembler::Slt(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x2a);
+}
+
+void MipsAssembler::Sltu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x2b);
+}
+
+void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0xa, rs, rt, imm16);
+}
+
+void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0xb, rs, rt, imm16);
+}
+
+void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x4, rs, rt, imm16);
+ Nop();
+}
+
+void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x5, rs, rt, imm16);
+ Nop();
+}
+
+void MipsAssembler::J(uint32_t address) {
+ EmitJ(0x2, address);
+ Nop();
+}
+
+void MipsAssembler::Jal(uint32_t address) {
+ EmitJ(0x2, address);
+ Nop();
+}
+
+void MipsAssembler::Jr(Register rs) {
+ EmitR(0, rs, static_cast<Register>(0), static_cast<Register>(0), 0, 0x08);
+ Nop();
+}
+
+void MipsAssembler::Jalr(Register rs) {
+ EmitR(0, rs, static_cast<Register>(0), RA, 0, 0x09);
+ Nop();
+}
+
+void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
+}
+
+void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
+}
+
+void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
+}
+
+void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
+}
+
+void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+ static_cast<FRegister>(fd), 0x0);
+}
+
+void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+ static_cast<FRegister>(fd), 0x1);
+}
+
+void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+ static_cast<FRegister>(fd), 0x2);
+}
+
+void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(ft), static_cast<FRegister>(fs),
+ static_cast<FRegister>(fd), 0x3);
+}
+
+void MipsAssembler::MovS(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6);
+}
+
+void MipsAssembler::MovD(DRegister fd, DRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), static_cast<FRegister>(fs),
+ static_cast<FRegister>(fd), 0x6);
+}
+
+void MipsAssembler::Mfc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Mtc1(FRegister ft, Register rs) {
+ EmitFR(0x11, 0x04, ft, static_cast<FRegister>(rs), static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x31, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x35, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x39, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x3d, rs, static_cast<Register>(ft), imm16);
+}
+
+void MipsAssembler::Break() {
+ EmitR(0, static_cast<Register>(0), static_cast<Register>(0),
+ static_cast<Register>(0), 0, 0xD);
+}
+
+void MipsAssembler::Nop() {
+ EmitR(0x0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0), 0, 0x0);
+}
+
+void MipsAssembler::Move(Register rt, Register rs) {
+ EmitI(0x8, rs, rt, 0);
+}
+
+void MipsAssembler::Clear(Register rt) {
+ EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rt, 0, 0x20);
+}
+
+void MipsAssembler::Not(Register rt, Register rs) {
+ EmitR(0, static_cast<Register>(0), rs, rt, 0, 0x27);
+}
+
+void MipsAssembler::Mul(Register rd, Register rs, Register rt) {
+ Mult(rs, rt);
+ Mflo(rd);
+}
+
+void MipsAssembler::Div(Register rd, Register rs, Register rt) {
+ Div(rs, rt);
+ Mflo(rd);
+}
+
+void MipsAssembler::Rem(Register rd, Register rs, Register rt) {
+ Div(rs, rt);
+ Mfhi(rd);
+}
+
+void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) {
+ Addi(rt, rs, value);
+}
+
+void MipsAssembler::LoadImmediate(Register rt, int32_t value) {
+ Addi(rt, ZERO, value);
+}
+
+void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
+ size_t size) {
+ MipsManagedRegister dst = m_dst.AsMips();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dst;
+ LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+ LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+ } else if (dst.IsFRegister()) {
+ LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ }
+}
+
+void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
+ int32_t offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ Lb(reg, base, offset);
+ break;
+ case kLoadUnsignedByte:
+ Lbu(reg, base, offset);
+ break;
+ case kLoadSignedHalfword:
+ Lh(reg, base, offset);
+ break;
+ case kLoadUnsignedHalfword:
+ Lhu(reg, base, offset);
+ break;
+ case kLoadWord:
+ Lw(reg, base, offset);
+ break;
+ case kLoadWordPair:
+ LOG(FATAL) << "UNREACHABLE";
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
+ Lwc1(reg, base, offset);
+}
+
+void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) {
+ Ldc1(reg, base, offset);
+}
+
+void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base,
+ int32_t offset) {
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ Sw(reg, base, offset);
+ break;
+ case kStoreWordPair:
+ LOG(FATAL) << "UNREACHABLE";
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) {
+ Swc1(reg, base, offset);
+}
+
+void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) {
+ Sdc1(reg, base, offset);
+}
+
+void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+
+ // Increase frame to required size.
+ IncreaseFrameSize(frame_size);
+
+ // Push callee saves and return address
+ int stack_offset = frame_size - kPointerSize;
+ StoreToOffset(kStoreWord, RA, SP, stack_offset);
+ for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
+ stack_offset -= kPointerSize;
+ Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
+ StoreToOffset(kStoreWord, reg, SP, stack_offset);
+ }
+
+ // Write out Method*.
+ StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0);
+
+ // Write out entry spills.
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Register reg = entry_spills.at(i).AsMips().AsCoreRegister();
+ StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize));
+ }
+}
+
+void MipsAssembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+
+ // Pop callee saves and return address
+ int stack_offset = frame_size - (callee_save_regs.size() * kPointerSize) - kPointerSize;
+ for (size_t i = 0; i < callee_save_regs.size(); ++i) {
+ Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
+ LoadFromOffset(kLoadWord, reg, SP, stack_offset);
+ stack_offset += kPointerSize;
+ }
+ LoadFromOffset(kLoadWord, RA, SP, stack_offset);
+
+ // Decrease frame to required size.
+ DecreaseFrameSize(frame_size);
+
+ // Then jump to the return address.
+ Jr(RA);
+}
+
+void MipsAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, SP, -adjust);
+}
+
+void MipsAssembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, SP, adjust);
+}
+
+void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ MipsManagedRegister src = msrc.AsMips();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+ StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
+ SP, dest.Int32Value() + 4);
+ } else if (src.IsFRegister()) {
+ StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK(src.IsDRegister());
+ StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ }
+}
+
+void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ MipsManagedRegister src = msrc.AsMips();
+ CHECK(src.IsCoreRegister());
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ MipsManagedRegister src = msrc.AsMips();
+ CHECK(src.IsCoreRegister());
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
+}
+
+void MipsAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ S1, thr_offs.Int32Value());
+}
+
+void MipsAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
+}
+
+void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+ FrameOffset in_off, ManagedRegister mscratch) {
+ MipsManagedRegister src = msrc.AsMips();
+ MipsManagedRegister scratch = mscratch.AsMips();
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+}
+
+void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ return EmitLoad(mdest, SP, src.Int32Value(), size);
+}
+
+void MipsAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+ return EmitLoad(mdest, S1, src.Int32Value(), size);
+}
+
+void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ MipsManagedRegister dest = mdest.AsMips();
+ CHECK(dest.IsCoreRegister());
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs) {
+ MipsManagedRegister dest = mdest.AsMips();
+ CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+ base.AsMips().AsCoreRegister(), offs.Int32Value());
+}
+
+void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ MipsManagedRegister dest = mdest.AsMips();
+ CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest;
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+ base.AsMips().AsCoreRegister(), offs.Int32Value());
+}
+
+void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest,
+ ThreadOffset offs) {
+ MipsManagedRegister dest = mdest.AsMips();
+ CHECK(dest.IsCoreRegister());
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
+}
+
+void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
+}
+
+void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
+}
+
+void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) {
+ MipsManagedRegister dest = mdest.AsMips();
+ MipsManagedRegister src = msrc.AsMips();
+ if (!dest.Equals(src)) {
+ if (dest.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ Move(dest.AsCoreRegister(), src.AsCoreRegister());
+ } else if (dest.IsFRegister()) {
+ CHECK(src.IsFRegister()) << src;
+ MovS(dest.AsFRegister(), src.AsFRegister());
+ } else if (dest.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ MovD(dest.AsDRegister(), src.AsDRegister());
+ } else {
+ CHECK(dest.IsRegisterPair()) << dest;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second
+ if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
+ Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
+ Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
+ } else {
+ Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
+ Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
+ }
+ }
+ }
+}
+
+void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset thr_offs,
+ ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ S1, thr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+}
+
+void MipsAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ S1, thr_offs.Int32Value());
+}
+
+void MipsAssembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch, size_t size) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ }
+}
+
+void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsMips().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsMips().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no arm implementation";
+#if 0
+ Register scratch = mscratch.AsMips().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ movl(scratch, Address(ESP, src_base));
+ movl(scratch, Address(scratch, src_offset));
+ movl(Address(ESP, dest), scratch);
+#endif
+}
+
+void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ CHECK_EQ(size, 4u);
+ Register scratch = mscratch.AsMips().AsCoreRegister();
+ LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no arm implementation";
+#if 0
+ Register scratch = mscratch.AsMips().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ movl(scratch, Address(ESP, src));
+ pushl(Address(scratch, src_offset));
+ popl(Address(scratch, dest_offset));
+#endif
+}
+
+void MipsAssembler::MemoryBarrier(ManagedRegister) {
+ UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED";
+#if 0
+#if ANDROID_SMP != 0
+ mfence();
+#endif
+#endif
+}
+
+void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
+ FrameOffset sirt_offset,
+ ManagedRegister min_reg, bool null_allowed) {
+ MipsManagedRegister out_reg = mout_reg.AsMips();
+ MipsManagedRegister in_reg = min_reg.AsMips();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ Label null_arg;
+ // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
+ // the address in the SIRT holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+ SP, sirt_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0);
+ }
+ EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
+ AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ Bind(&null_arg, false);
+ } else {
+ AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ }
+}
+
+void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
+ FrameOffset sirt_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ Label null_arg;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+ sirt_offset.Int32Value());
+ // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
+ // the address in the SIRT holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
+ AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ Bind(&null_arg, false);
+ } else {
+ AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ }
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+// Given a SIRT entry, load the associated reference.
+void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ MipsManagedRegister out_reg = mout_reg.AsMips();
+ MipsManagedRegister in_reg = min_reg.AsMips();
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ CHECK(in_reg.IsCoreRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0);
+ }
+ EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+ in_reg.AsCoreRegister(), 0);
+ Bind(&null_arg, false);
+}
+
+void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
+ MipsManagedRegister base = mbase.AsMips();
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ base.AsCoreRegister(), offset.Int32Value());
+ Jalr(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, base.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ scratch.AsCoreRegister(), offset.Int32Value());
+ Jalr(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) {
+ UNIMPLEMENTED(FATAL) << "no arm implementation";
+#if 0
+ fs()->call(Address::Absolute(offset));
+#endif
+}
+
+void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
+ Move(tr.AsMips().AsCoreRegister(), S1);
+}
+
+void MipsAssembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister /*mscratch*/) {
+ StoreToOffset(kStoreWord, S1, SP, offset.Int32Value());
+}
+
+void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ MipsManagedRegister scratch = mscratch.AsMips();
+ MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust);
+ buffer_.EnqueueSlowPath(slow);
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ S1, Thread::ExceptionOffset().Int32Value());
+ EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false);
+}
+
+void MipsExceptionSlowPath::Emit(Assembler* sasm) {
+ MipsAssembler* sp_asm = down_cast<MipsAssembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_, false);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
+ // Pass exception object as argument
+ // Don't care about preserving A0 as this call won't return
+ __ Move(A0, scratch_.AsCoreRegister());
+ // Set up call to Thread::Current()->pDeliverException
+ __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException));
+ __ Jr(T9);
+ // Call never returns
+ __ Break();
+#undef __
+}
+
+} // namespace mips
+} // namespace art
diff --git a/runtime/oat/utils/mips/assembler_mips.h b/runtime/oat/utils/mips/assembler_mips.h
new file mode 100644
index 0000000..02759e4
--- /dev/null
+++ b/runtime/oat/utils/mips/assembler_mips.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#define ART_SRC_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "constants_mips.h"
+#include "globals.h"
+#include "managed_register_mips.h"
+#include "oat/utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace mips {
+#if 0
+class Operand {
+ public:
+ uint8_t mod() const {
+ return (encoding_at(0) >> 6) & 3;
+ }
+
+ Register rm() const {
+ return static_cast<Register>(encoding_at(0) & 7);
+ }
+
+ ScaleFactor scale() const {
+ return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
+ }
+
+ Register index() const {
+ return static_cast<Register>((encoding_at(1) >> 3) & 7);
+ }
+
+ Register base() const {
+ return static_cast<Register>(encoding_at(1) & 7);
+ }
+
+ int8_t disp8() const {
+ CHECK_GE(length_, 2);
+ return static_cast<int8_t>(encoding_[length_ - 1]);
+ }
+
+ int32_t disp32() const {
+ CHECK_GE(length_, 5);
+ int32_t value;
+ memcpy(&value, &encoding_[length_ - 4], sizeof(value));
+ return value;
+ }
+
+ bool IsRegister(Register reg) const {
+ return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only.
+ && ((encoding_[0] & 0x07) == reg); // Register codes match.
+ }
+
+ protected:
+ // Operand can be sub classed (e.g: Address).
+ Operand() : length_(0) { }
+
+ void SetModRM(int mod, Register rm) {
+ CHECK_EQ(mod & ~3, 0);
+ encoding_[0] = (mod << 6) | rm;
+ length_ = 1;
+ }
+
+ void SetSIB(ScaleFactor scale, Register index, Register base) {
+ CHECK_EQ(length_, 1);
+ CHECK_EQ(scale & ~3, 0);
+ encoding_[1] = (scale << 6) | (index << 3) | base;
+ length_ = 2;
+ }
+
+ void SetDisp8(int8_t disp) {
+ CHECK(length_ == 1 || length_ == 2);
+ encoding_[length_++] = static_cast<uint8_t>(disp);
+ }
+
+ void SetDisp32(int32_t disp) {
+ CHECK(length_ == 1 || length_ == 2);
+ int disp_size = sizeof(disp);
+ memmove(&encoding_[length_], &disp, disp_size);
+ length_ += disp_size;
+ }
+
+ private:
+ byte length_;
+ byte encoding_[6];
+ byte padding_;
+
+ explicit Operand(Register reg) { SetModRM(3, reg); }
+
+ // Get the operand encoding byte at the given index.
+ uint8_t encoding_at(int index) const {
+ CHECK_GE(index, 0);
+ CHECK_LT(index, length_);
+ return encoding_[index];
+ }
+
+ friend class MipsAssembler;
+
+ // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+ DISALLOW_COPY_AND_ASSIGN(Operand);
+#endif
+};
+
+
+class Address : public Operand {
+ public:
+ Address(Register base, int32_t disp) {
+ Init(base, disp);
+ }
+
+ Address(Register base, Offset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ Address(Register base, FrameOffset disp) {
+ CHECK_EQ(base, ESP);
+ Init(ESP, disp.Int32Value());
+ }
+
+ Address(Register base, MemberOffset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ void Init(Register base, int32_t disp) {
+ if (disp == 0 && base != EBP) {
+ SetModRM(0, base);
+ if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ } else if (disp >= -128 && disp <= 127) {
+ SetModRM(1, base);
+ if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetDisp8(disp);
+ } else {
+ SetModRM(2, base);
+ if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetDisp32(disp);
+ }
+ }
+
+
+ Address(Register index, ScaleFactor scale, int32_t disp) {
+ CHECK_NE(index, ESP); // Illegal addressing mode.
+ SetModRM(0, ESP);
+ SetSIB(scale, index, EBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
+ CHECK_NE(index, ESP); // Illegal addressing mode.
+ if (disp == 0 && base != EBP) {
+ SetModRM(0, ESP);
+ SetSIB(scale, index, base);
+ } else if (disp >= -128 && disp <= 127) {
+ SetModRM(1, ESP);
+ SetSIB(scale, index, base);
+ SetDisp8(disp);
+ } else {
+ SetModRM(2, ESP);
+ SetSIB(scale, index, base);
+ SetDisp32(disp);
+ }
+ }
+
+ static Address Absolute(uword addr) {
+ Address result;
+ result.SetModRM(0, EBP);
+ result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset addr) {
+ return Absolute(addr.Int32Value());
+ }
+
+ private:
+ Address() {}
+
+ // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+ DISALLOW_COPY_AND_ASSIGN(Address);
+#endif
+};
+
+#endif
+
+enum LoadOperandType {
+ kLoadSignedByte,
+ kLoadUnsignedByte,
+ kLoadSignedHalfword,
+ kLoadUnsignedHalfword,
+ kLoadWord,
+ kLoadWordPair,
+ kLoadSWord,
+ kLoadDWord
+};
+
+enum StoreOperandType {
+ kStoreByte,
+ kStoreHalfword,
+ kStoreWord,
+ kStoreWordPair,
+ kStoreSWord,
+ kStoreDWord
+};
+
+class MipsAssembler : public Assembler {
+ public:
+ MipsAssembler() {}
+ virtual ~MipsAssembler() {}
+
+ // Emit Machine Instructions.
+ void Add(Register rd, Register rs, Register rt);
+ void Addu(Register rd, Register rs, Register rt);
+ void Addi(Register rt, Register rs, uint16_t imm16);
+ void Addiu(Register rt, Register rs, uint16_t imm16);
+ void Sub(Register rd, Register rs, Register rt);
+ void Subu(Register rd, Register rs, Register rt);
+ void Mult(Register rs, Register rt);
+ void Multu(Register rs, Register rt);
+ void Div(Register rs, Register rt);
+ void Divu(Register rs, Register rt);
+
+ void And(Register rd, Register rs, Register rt);
+ void Andi(Register rt, Register rs, uint16_t imm16);
+ void Or(Register rd, Register rs, Register rt);
+ void Ori(Register rt, Register rs, uint16_t imm16);
+ void Xor(Register rd, Register rs, Register rt);
+ void Xori(Register rt, Register rs, uint16_t imm16);
+ void Nor(Register rd, Register rs, Register rt);
+
+ void Sll(Register rd, Register rs, int shamt);
+ void Srl(Register rd, Register rs, int shamt);
+ void Sra(Register rd, Register rs, int shamt);
+ void Sllv(Register rd, Register rs, Register rt);
+ void Srlv(Register rd, Register rs, Register rt);
+ void Srav(Register rd, Register rs, Register rt);
+
+ void Lb(Register rt, Register rs, uint16_t imm16);
+ void Lh(Register rt, Register rs, uint16_t imm16);
+ void Lw(Register rt, Register rs, uint16_t imm16);
+ void Lbu(Register rt, Register rs, uint16_t imm16);
+ void Lhu(Register rt, Register rs, uint16_t imm16);
+ void Lui(Register rt, uint16_t imm16);
+ void Mfhi(Register rd);
+ void Mflo(Register rd);
+
+ void Sb(Register rt, Register rs, uint16_t imm16);
+ void Sh(Register rt, Register rs, uint16_t imm16);
+ void Sw(Register rt, Register rs, uint16_t imm16);
+
+ void Slt(Register rd, Register rs, Register rt);
+ void Sltu(Register rd, Register rs, Register rt);
+ void Slti(Register rt, Register rs, uint16_t imm16);
+ void Sltiu(Register rt, Register rs, uint16_t imm16);
+
+ void Beq(Register rt, Register rs, uint16_t imm16);
+ void Bne(Register rt, Register rs, uint16_t imm16);
+ void J(uint32_t address);
+ void Jal(uint32_t address);
+ void Jr(Register rs);
+ void Jalr(Register rs);
+
+ void AddS(FRegister fd, FRegister fs, FRegister ft);
+ void SubS(FRegister fd, FRegister fs, FRegister ft);
+ void MulS(FRegister fd, FRegister fs, FRegister ft);
+ void DivS(FRegister fd, FRegister fs, FRegister ft);
+ void AddD(DRegister fd, DRegister fs, DRegister ft);
+ void SubD(DRegister fd, DRegister fs, DRegister ft);
+ void MulD(DRegister fd, DRegister fs, DRegister ft);
+ void DivD(DRegister fd, DRegister fs, DRegister ft);
+ void MovS(FRegister fd, FRegister fs);
+ void MovD(DRegister fd, DRegister fs);
+
+ void Mfc1(Register rt, FRegister fs);
+ void Mtc1(FRegister ft, Register rs);
+ void Lwc1(FRegister ft, Register rs, uint16_t imm16);
+ void Ldc1(DRegister ft, Register rs, uint16_t imm16);
+ void Swc1(FRegister ft, Register rs, uint16_t imm16);
+ void Sdc1(DRegister ft, Register rs, uint16_t imm16);
+
+ void Break();
+ void Nop();
+ void Move(Register rt, Register rs);
+ void Clear(Register rt);
+ void Not(Register rt, Register rs);
+ void Mul(Register rd, Register rs, Register rt);
+ void Div(Register rd, Register rs, Register rt);
+ void Rem(Register rd, Register rs, Register rt);
+
+ void AddConstant(Register rt, Register rs, int32_t value);
+ void LoadImmediate(Register rt, int32_t value);
+
+ void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size);
+ void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
+ void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
+ void LoadDFromOffset(DRegister reg, Register base, int32_t offset);
+ void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset);
+ void StoreFToOffset(FRegister reg, Register base, int32_t offset);
+ void StoreDToOffset(DRegister reg, Register base, int32_t offset);
+
+#if 0
+ MipsAssembler* lock();
+
+ void mfence();
+
+ MipsAssembler* fs();
+
+ //
+ // Macros for High-level operations.
+ //
+
+ void AddImmediate(Register reg, const Immediate& imm);
+
+ void LoadDoubleConstant(XmmRegister dst, double value);
+
+ void DoubleNegate(XmmRegister d);
+ void FloatNegate(XmmRegister f);
+
+ void DoubleAbs(XmmRegister reg);
+
+ void LockCmpxchgl(const Address& address, Register reg) {
+ lock()->cmpxchgl(address, reg);
+ }
+
+ //
+ // Misc. functionality
+ //
+ int PreferredLoopAlignment() { return 16; }
+ void Align(int alignment, int offset);
+
+ // Debugging and bringup support.
+ void Stop(const char* message);
+#endif
+
+ // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
+ void Emit(int32_t value);
+ void EmitBranch(Register rt, Register rs, Label* label, bool equal);
+ void EmitJump(Label* label, bool link);
+ void Bind(Label* label, bool is_jump);
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills);
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs);
+
+ virtual void IncreaseFrameSize(size_t adjust);
+ virtual void DecreaseFrameSize(size_t adjust);
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister msrc, size_t size);
+ virtual void StoreRef(FrameOffset dest, ManagedRegister msrc);
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister msrc);
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister mscratch);
+
+ virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister mscratch);
+
+ virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch);
+
+ virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+ virtual void StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+ FrameOffset in_off, ManagedRegister mscratch);
+
+ // Load routines
+ virtual void Load(ManagedRegister mdest, FrameOffset src, size_t size);
+
+ virtual void Load(ManagedRegister mdest, ThreadOffset src, size_t size);
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src);
+
+ virtual void LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs);
+
+ virtual void LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs);
+
+ virtual void LoadRawPtrFromThread(ManagedRegister mdest,
+ ThreadOffset offs);
+
+ // Copying routines
+ virtual void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size);
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister mscratch);
+
+ virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister mscratch);
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister mscratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister mscratch, size_t size);
+
+ virtual void MemoryBarrier(ManagedRegister);
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr);
+ virtual void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister mscratch);
+
+ // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the SIRT entry to see if the value is
+ // NULL.
+ virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed.
+ virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ ManagedRegister mscratch, bool null_allowed);
+
+ // src holds a SIRT entry (Object**) load this into dst
+ virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ ManagedRegister src);
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+ virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset,
+ ManagedRegister mscratch);
+ virtual void Call(FrameOffset base, Offset offset,
+ ManagedRegister mscratch);
+ virtual void Call(ThreadOffset offset, ManagedRegister mscratch);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust);
+
+ private:
+ void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
+ void EmitI(int opcode, Register rs, Register rt, uint16_t imm);
+ void EmitJ(int opcode, int address);
+ void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
+ void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
+
+ int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
+ int DecodeBranchOffset(int32_t inst, bool is_jump);
+
+ DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class MipsExceptionSlowPath : public SlowPath {
+ public:
+ explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm);
+ private:
+ const MipsManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_
diff --git a/runtime/oat/utils/mips/managed_register_mips.cc b/runtime/oat/utils/mips/managed_register_mips.cc
new file mode 100644
index 0000000..195dafb
--- /dev/null
+++ b/runtime/oat/utils/mips/managed_register_mips.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_mips.h"
+
+#include "globals.h"
+
+namespace art {
+namespace mips {
+
+// These core registers are never available for allocation.
+static const Register kReservedCoreRegistersArray[] = { S0, S1 };
+
+// We need all registers for caching.
+static const int kNumberOfAvailableCoreRegisters = (S7 - T0) + 1;
+static const int kNumberOfAvailableFRegisters = kNumberOfFRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+ kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ CHECK(IsValidManagedRegister());
+ CHECK(other.IsValidManagedRegister());
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return MipsManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+ MipsManagedRegister::FromCoreRegister(high).Overlaps(other);
+ }
+ if (IsOverlappingDRegister()) {
+ if (other.IsDRegister()) return Equals(other);
+ if (other.IsFRegister()) {
+ FRegister low = AsOverlappingDRegisterLow();
+ FRegister high = AsOverlappingDRegisterHigh();
+ FRegister other_freg = other.AsFRegister();
+ return (low == other_freg) || (high == other_freg);
+ }
+ return false;
+ }
+ if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int MipsManagedRegister::AllocIdLow() const {
+ CHECK(IsOverlappingDRegister() || IsRegisterPair());
+ const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfFRegIds);
+ int low;
+ if (r < kNumberOfOverlappingDRegIds) {
+ CHECK(IsOverlappingDRegister());
+ low = (r * 2) + kNumberOfCoreRegIds; // Return an FRegister.
+ } else {
+ CHECK(IsRegisterPair());
+ low = (r - kNumberOfDRegIds) * 2 + 2; // Return a Register.
+ if (low >= 24) {
+ // we got a pair higher than S6_S7, must be the dalvik special case
+ low = 5;
+ }
+ }
+ return low;
+}
+
+
+int MipsManagedRegister::AllocIdHigh() const {
+ return AllocIdLow() + 1;
+}
+
+
+void MipsManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsCoreRegister()) {
+ os << "Core: " << static_cast<int>(AsCoreRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+ } else if (IsFRegister()) {
+ os << "FRegister: " << static_cast<int>(AsFRegister());
+ } else if (IsDRegister()) {
+ os << "DRegister: " << static_cast<int>(AsDRegister());
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+ os << MipsManagedRegister::FromRegisterPair(reg);
+ return os;
+}
+
+} // namespace mips
+} // namespace art
diff --git a/runtime/oat/utils/mips/managed_register_mips.h b/runtime/oat/utils/mips/managed_register_mips.h
new file mode 100644
index 0000000..aaaabfc
--- /dev/null
+++ b/runtime/oat/utils/mips/managed_register_mips.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+#define ART_SRC_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+
+#include "constants_mips.h"
+#include "oat/utils/managed_register.h"
+
+namespace art {
+namespace mips {
+
+// Values for register pairs.
+enum RegisterPair {
+ V0_V1 = 0,
+ A0_A1 = 1,
+ A2_A3 = 2,
+ T0_T1 = 3,
+ T2_T3 = 4,
+ T4_T5 = 5,
+ T6_T7 = 6,
+ S0_S1 = 7,
+ S2_S3 = 8,
+ S4_S5 = 9,
+ S6_S7 = 10,
+ A1_A2 = 11, // Dalvik style passing
+ kNumberOfRegisterPairs = 12,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfFRegIds = kNumberOfFRegisters;
+const int kNumberOfFAllocIds = kNumberOfFRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegisters;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfFRegIds +
+ kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+ kNumberOfCoreAllocIds + kNumberOfFAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+// [0..R[ core registers (enum Register)
+// [R..F[ single precision FP registers (enum FRegister)
+// [F..D[ double precision FP registers (enum DRegister)
+// [D..P[ core register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCoreRegIds
+// F = R + kNumberOfFRegIds
+// D = F + kNumberOfDRegIds
+// P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ core registers (enum Register)
+// [R..F[ single precision FP registers (enum FRegister)
+// where
+// R = kNumberOfCoreRegIds
+// F = R + kNumberOfFRegIds
+
+
+// An instance of class 'ManagedRegister' represents a single core register (enum
+// Register), a single precision FP register (enum FRegister), a double precision
+// FP register (enum DRegister), or a pair of core registers (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class MipsManagedRegister : public ManagedRegister {
+ public:
+ Register AsCoreRegister() const {
+ CHECK(IsCoreRegister());
+ return static_cast<Register>(id_);
+ }
+
+ FRegister AsFRegister() const {
+ CHECK(IsFRegister());
+ return static_cast<FRegister>(id_ - kNumberOfCoreRegIds);
+ }
+
+ DRegister AsDRegister() const {
+ CHECK(IsDRegister());
+ return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfFRegIds);
+ }
+
+ FRegister AsOverlappingDRegisterLow() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<FRegister>(d_reg * 2);
+ }
+
+ FRegister AsOverlappingDRegisterHigh() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<FRegister>(d_reg * 2 + 1);
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCoreRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCoreRegister();
+ }
+
+ bool IsCoreRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+ }
+
+ bool IsFRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCoreRegIds;
+ return (0 <= test) && (test < kNumberOfFRegIds);
+ }
+
+ bool IsDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds);
+ return (0 <= test) && (test < kNumberOfDRegIds);
+ }
+
+ // Returns true if this DRegister overlaps FRegisters.
+ bool IsOverlappingDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds);
+ return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test =
+ id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ void Print(std::ostream& os) const;
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const MipsManagedRegister& other) const;
+
+ static MipsManagedRegister FromCoreRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static MipsManagedRegister FromFRegister(FRegister r) {
+ CHECK_NE(r, kNoFRegister);
+ return FromRegId(r + kNumberOfCoreRegIds);
+ }
+
+ static MipsManagedRegister FromDRegister(DRegister r) {
+ CHECK_NE(r, kNoDRegister);
+ return FromRegId(r + kNumberOfCoreRegIds + kNumberOfFRegIds);
+ }
+
+ static MipsManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds));
+ }
+
+ private:
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() && !IsOverlappingDRegister() && !IsRegisterPair());
+ CHECK_LT(id_, kNumberOfAllocIds);
+ return id_;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ friend class ManagedRegister;
+
+ explicit MipsManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+ static MipsManagedRegister FromRegId(int reg_id) {
+ MipsManagedRegister reg(reg_id);
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg);
+
+} // namespace mips
+
+inline mips::MipsManagedRegister ManagedRegister::AsMips() const {
+ mips::MipsManagedRegister reg(id_);
+ CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+ return reg;
+}
+
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
diff --git a/runtime/oat/utils/x86/assembler_x86.cc b/runtime/oat/utils/x86/assembler_x86.cc
new file mode 100644
index 0000000..fd8f152
--- /dev/null
+++ b/runtime/oat/utils/x86/assembler_x86.cc
@@ -0,0 +1,1859 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86.h"
+
+#include "base/casts.h"
+#include "memory_region.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+namespace x86 {
+
+class DirectCallRelocation : public AssemblerFixup {
+ public:
+ void Process(const MemoryRegion& region, int position) {
+ // Direct calls are relative to the following instruction on x86.
+ int32_t pointer = region.Load<int32_t>(position);
+ int32_t start = reinterpret_cast<int32_t>(region.start());
+ int32_t delta = start + position + sizeof(int32_t);
+ region.Store<int32_t>(position, pointer - delta);
+ }
+};
+
+static const char* kRegisterNames[] = {
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= EAX && rhs <= EDI) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) {
+ return os << "XMM" << static_cast<int>(reg);
+}
+
+std::ostream& operator<<(std::ostream& os, const X87Register& reg) {
+ return os << "ST" << static_cast<int>(reg);
+}
+
+void X86Assembler::call(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitRegisterOperand(2, reg);
+}
+
+
+void X86Assembler::call(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(2, address);
+}
+
+
+void X86Assembler::call(Label* label) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xE8);
+ static const int kSize = 5;
+ EmitLabel(label, kSize);
+}
+
+
+void X86Assembler::pushl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x50 + reg);
+}
+
+
+void X86Assembler::pushl(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(6, address);
+}
+
+
+void X86Assembler::pushl(const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (imm.is_int8()) {
+ EmitUint8(0x6A);
+ EmitUint8(imm.value() & 0xFF);
+ } else {
+ EmitUint8(0x68);
+ EmitImmediate(imm);
+ }
+}
+
+
+void X86Assembler::popl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x58 + reg);
+}
+
+
+void X86Assembler::popl(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x8F);
+ EmitOperand(0, address);
+}
+
+
+void X86Assembler::movl(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xB8 + dst);
+ EmitImmediate(imm);
+}
+
+
+void X86Assembler::movl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x89);
+ EmitRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::movl(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x8B);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movl(const Address& dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x89);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movl(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+void X86Assembler::movl(const Address& dst, Label* lbl) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitLabel(lbl, dst.length_ + 5);
+}
+
+void X86Assembler::movzxb(Register dst, ByteRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB6);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movzxb(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB6);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsxb(Register dst, ByteRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBE);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movsxb(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBE);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movb(Register /*dst*/, const Address& /*src*/) {
+ LOG(FATAL) << "Use movzxb or movsxb instead.";
+}
+
+
+void X86Assembler::movb(const Address& dst, ByteRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x88);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movb(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC6);
+ EmitOperand(EAX, dst);
+ CHECK(imm.is_int8());
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86Assembler::movzxw(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB7);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movzxw(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB7);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsxw(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBF);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movsxw(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBF);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movw(Register /*dst*/, const Address& /*src*/) {
+ LOG(FATAL) << "Use movzxw or movsxw instead.";
+}
+
+
+void X86Assembler::movw(const Address& dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOperandSizeOverride();
+ EmitUint8(0x89);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::leal(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x8D);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::cmovl(Condition condition, Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x40 + condition);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::setb(Condition condition, Register dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x90 + condition);
+ EmitOperand(0, Operand(dst));
+}
+
+
+void X86Assembler::movss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movss(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::movd(XmmRegister dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x6E);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::movd(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x7E);
+ EmitOperand(src, Operand(dst));
+}
+
+
+void X86Assembler::addss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::subss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::mulss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::divss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::flds(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(0, src);
+}
+
+
+void X86Assembler::fstps(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(3, dst);
+}
+
+
+void X86Assembler::movsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::addsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::subsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::mulsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::divsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::cvtsi2sd(XmmRegister dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::cvtss2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x2D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsd2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x2D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvttss2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x2C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvttsd2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x2C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0xE6);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::comiss(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x51);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::sqrtss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x51);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::xorpd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::andpd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x54);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::fldl(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDD);
+ EmitOperand(0, src);
+}
+
+
+void X86Assembler::fstpl(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDD);
+ EmitOperand(3, dst);
+}
+
+
+void X86Assembler::fnstcw(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(7, dst);
+}
+
+
+void X86Assembler::fldcw(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(5, src);
+}
+
+
+void X86Assembler::fistpl(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDF);
+ EmitOperand(7, dst);
+}
+
+
+void X86Assembler::fistps(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDB);
+ EmitOperand(3, dst);
+}
+
+
+void X86Assembler::fildl(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDF);
+ EmitOperand(5, src);
+}
+
+
+void X86Assembler::fincstp() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xF7);
+}
+
+
+void X86Assembler::ffree(const Immediate& index) {
+ CHECK_LT(index.value(), 7);
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDD);
+ EmitUint8(0xC0 + index.value());
+}
+
+
+void X86Assembler::fsin() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xFE);
+}
+
+
+void X86Assembler::fcos() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xFF);
+}
+
+
+void X86Assembler::fptan() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xF2);
+}
+
+
+void X86Assembler::xchgl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x87);
+ EmitRegisterOperand(dst, src);
+}
+
+void X86Assembler::xchgl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x87);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(7, Operand(reg), imm);
+}
+
+
+void X86Assembler::cmpl(Register reg0, Register reg1) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x3B);
+ EmitOperand(reg0, Operand(reg1));
+}
+
+
+void X86Assembler::cmpl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x3B);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::addl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x03);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x03);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x39);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(7, address, imm);
+}
+
+
+void X86Assembler::testl(Register reg1, Register reg2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x85);
+ EmitRegisterOperand(reg1, reg2);
+}
+
+
+void X86Assembler::testl(Register reg, const Immediate& immediate) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ // For registers that have a byte variant (EAX, EBX, ECX, and EDX)
+ // we only test the byte register to keep the encoding short.
+ if (immediate.is_uint8() && reg < 4) {
+ // Use zero-extended 8-bit immediate.
+ if (reg == EAX) {
+ EmitUint8(0xA8);
+ } else {
+ EmitUint8(0xF6);
+ EmitUint8(0xC0 + reg);
+ }
+ EmitUint8(immediate.value() & 0xFF);
+ } else if (reg == EAX) {
+ // Use short form if the destination is EAX.
+ EmitUint8(0xA9);
+ EmitImmediate(immediate);
+ } else {
+ EmitUint8(0xF7);
+ EmitOperand(0, Operand(reg));
+ EmitImmediate(immediate);
+ }
+}
+
+
+void X86Assembler::andl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x23);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::andl(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(4, Operand(dst), imm);
+}
+
+
+void X86Assembler::orl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0B);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::orl(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(1, Operand(dst), imm);
+}
+
+
+void X86Assembler::xorl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x33);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::addl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(0, Operand(reg), imm);
+}
+
+
+void X86Assembler::addl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x01);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::addl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(0, address, imm);
+}
+
+
+void X86Assembler::adcl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(2, Operand(reg), imm);
+}
+
+
+void X86Assembler::adcl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x13);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::adcl(Register dst, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x13);
+ EmitOperand(dst, address);
+}
+
+
+void X86Assembler::subl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x2B);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::subl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(5, Operand(reg), imm);
+}
+
+
+void X86Assembler::subl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x2B);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cdq() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x99);
+}
+
+
+void X86Assembler::idivl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitUint8(0xF8 | reg);
+}
+
+
+void X86Assembler::imull(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAF);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::imull(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x69);
+ EmitOperand(reg, Operand(reg));
+ EmitImmediate(imm);
+}
+
+
+void X86Assembler::imull(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAF);
+ EmitOperand(reg, address);
+}
+
+
+void X86Assembler::imull(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(5, Operand(reg));
+}
+
+
+void X86Assembler::imull(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(5, address);
+}
+
+
+void X86Assembler::mull(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(4, Operand(reg));
+}
+
+
+void X86Assembler::mull(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(4, address);
+}
+
+
+void X86Assembler::sbbl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x1B);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::sbbl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(3, Operand(reg), imm);
+}
+
+
+void X86Assembler::sbbl(Register dst, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x1B);
+ EmitOperand(dst, address);
+}
+
+
+void X86Assembler::incl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x40 + reg);
+}
+
+
+void X86Assembler::incl(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(0, address);
+}
+
+
+void X86Assembler::decl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x48 + reg);
+}
+
+
+void X86Assembler::decl(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(1, address);
+}
+
+
+void X86Assembler::shll(Register reg, const Immediate& imm) {
+ EmitGenericShift(4, reg, imm);
+}
+
+
+void X86Assembler::shll(Register operand, Register shifter) {
+ EmitGenericShift(4, operand, shifter);
+}
+
+
+void X86Assembler::shrl(Register reg, const Immediate& imm) {
+ EmitGenericShift(5, reg, imm);
+}
+
+
+void X86Assembler::shrl(Register operand, Register shifter) {
+ EmitGenericShift(5, operand, shifter);
+}
+
+
+void X86Assembler::sarl(Register reg, const Immediate& imm) {
+ EmitGenericShift(7, reg, imm);
+}
+
+
+void X86Assembler::sarl(Register operand, Register shifter) {
+ EmitGenericShift(7, operand, shifter);
+}
+
+
+void X86Assembler::shld(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xA5);
+ EmitRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::negl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(3, Operand(reg));
+}
+
+
+void X86Assembler::notl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitUint8(0xD0 | reg);
+}
+
+
+void X86Assembler::enter(const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC8);
+ CHECK(imm.is_uint16());
+ EmitUint8(imm.value() & 0xFF);
+ EmitUint8((imm.value() >> 8) & 0xFF);
+ EmitUint8(0x00);
+}
+
+
+void X86Assembler::leave() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC9);
+}
+
+
+void X86Assembler::ret() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC3);
+}
+
+
+void X86Assembler::ret(const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC2);
+ CHECK(imm.is_uint16());
+ EmitUint8(imm.value() & 0xFF);
+ EmitUint8((imm.value() >> 8) & 0xFF);
+}
+
+
+
+void X86Assembler::nop() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x90);
+}
+
+
+void X86Assembler::int3() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xCC);
+}
+
+
+void X86Assembler::hlt() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF4);
+}
+
+
+void X86Assembler::j(Condition condition, Label* label) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (label->IsBound()) {
+ static const int kShortSize = 2;
+ static const int kLongSize = 6;
+ int offset = label->Position() - buffer_.Size();
+ CHECK_LE(offset, 0);
+ if (IsInt(8, offset - kShortSize)) {
+ EmitUint8(0x70 + condition);
+ EmitUint8((offset - kShortSize) & 0xFF);
+ } else {
+ EmitUint8(0x0F);
+ EmitUint8(0x80 + condition);
+ EmitInt32(offset - kLongSize);
+ }
+ } else {
+ EmitUint8(0x0F);
+ EmitUint8(0x80 + condition);
+ EmitLabelLink(label);
+ }
+}
+
+
+void X86Assembler::jmp(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitRegisterOperand(4, reg);
+}
+
+void X86Assembler::jmp(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(4, address);
+}
+
+void X86Assembler::jmp(Label* label) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (label->IsBound()) {
+ static const int kShortSize = 2;
+ static const int kLongSize = 5;
+ int offset = label->Position() - buffer_.Size();
+ CHECK_LE(offset, 0);
+ if (IsInt(8, offset - kShortSize)) {
+ EmitUint8(0xEB);
+ EmitUint8((offset - kShortSize) & 0xFF);
+ } else {
+ EmitUint8(0xE9);
+ EmitInt32(offset - kLongSize);
+ }
+ } else {
+ EmitUint8(0xE9);
+ EmitLabelLink(label);
+ }
+}
+
+
+X86Assembler* X86Assembler::lock() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF0);
+ return this;
+}
+
+
+void X86Assembler::cmpxchgl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB1);
+ EmitOperand(reg, address);
+}
+
+void X86Assembler::mfence() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAE);
+ EmitUint8(0xF0);
+}
+
+X86Assembler* X86Assembler::fs() {
+ // TODO: fs is a prefix and not an instruction
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x64);
+ return this;
+}
+
+void X86Assembler::AddImmediate(Register reg, const Immediate& imm) {
+ int value = imm.value();
+ if (value > 0) {
+ if (value == 1) {
+ incl(reg);
+ } else if (value != 0) {
+ addl(reg, imm);
+ }
+ } else if (value < 0) {
+ value = -value;
+ if (value == 1) {
+ decl(reg);
+ } else if (value != 0) {
+ subl(reg, Immediate(value));
+ }
+ }
+}
+
+
+void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
+ // TODO: Need to have a code constants table.
+ int64_t constant = bit_cast<int64_t, double>(value);
+ pushl(Immediate(High32Bits(constant)));
+ pushl(Immediate(Low32Bits(constant)));
+ movsd(dst, Address(ESP, 0));
+ addl(ESP, Immediate(2 * kWordSize));
+}
+
+
+void X86Assembler::FloatNegate(XmmRegister f) {
+ static const struct {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant __attribute__((aligned(16))) =
+ { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
+ xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+}
+
+
+void X86Assembler::DoubleNegate(XmmRegister d) {
+ static const struct {
+ uint64_t a;
+ uint64_t b;
+ } double_negate_constant __attribute__((aligned(16))) =
+ {0x8000000000000000LL, 0x8000000000000000LL};
+ xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+}
+
+
+void X86Assembler::DoubleAbs(XmmRegister reg) {
+ static const struct {
+ uint64_t a;
+ uint64_t b;
+ } double_abs_constant __attribute__((aligned(16))) =
+ {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
+ andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+}
+
+
+void X86Assembler::Align(int alignment, int offset) {
+ CHECK(IsPowerOfTwo(alignment));
+ // Emit nop instruction until the real position is aligned.
+ while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) {
+ nop();
+ }
+}
+
+
+void X86Assembler::Bind(Label* label) {
+ int bound = buffer_.Size();
+ CHECK(!label->IsBound()); // Labels can only be bound once.
+ while (label->IsLinked()) {
+ int position = label->LinkPosition();
+ int next = buffer_.Load<int32_t>(position);
+ buffer_.Store<int32_t>(position, bound - (position + 4));
+ label->position_ = next;
+ }
+ label->BindTo(bound);
+}
+
+
+void X86Assembler::Stop(const char* message) {
+ // Emit the message address as immediate operand in the test rax instruction,
+ // followed by the int3 instruction.
+ // Execution can be resumed with the 'cont' command in gdb.
+ testl(EAX, Immediate(reinterpret_cast<int32_t>(message)));
+ int3();
+}
+
+
+void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) {
+ CHECK_GE(reg_or_opcode, 0);
+ CHECK_LT(reg_or_opcode, 8);
+ const int length = operand.length_;
+ CHECK_GT(length, 0);
+ // Emit the ModRM byte updated with the given reg value.
+ CHECK_EQ(operand.encoding_[0] & 0x38, 0);
+ EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3));
+ // Emit the rest of the encoded operand.
+ for (int i = 1; i < length; i++) {
+ EmitUint8(operand.encoding_[i]);
+ }
+}
+
+
+void X86Assembler::EmitImmediate(const Immediate& imm) {
+ EmitInt32(imm.value());
+}
+
+
+void X86Assembler::EmitComplex(int reg_or_opcode,
+ const Operand& operand,
+ const Immediate& immediate) {
+ CHECK_GE(reg_or_opcode, 0);
+ CHECK_LT(reg_or_opcode, 8);
+ if (immediate.is_int8()) {
+ // Use sign-extended 8-bit immediate.
+ EmitUint8(0x83);
+ EmitOperand(reg_or_opcode, operand);
+ EmitUint8(immediate.value() & 0xFF);
+ } else if (operand.IsRegister(EAX)) {
+ // Use short form if the destination is eax.
+ EmitUint8(0x05 + (reg_or_opcode << 3));
+ EmitImmediate(immediate);
+ } else {
+ EmitUint8(0x81);
+ EmitOperand(reg_or_opcode, operand);
+ EmitImmediate(immediate);
+ }
+}
+
+
+void X86Assembler::EmitLabel(Label* label, int instruction_size) {
+ if (label->IsBound()) {
+ int offset = label->Position() - buffer_.Size();
+ CHECK_LE(offset, 0);
+ EmitInt32(offset - instruction_size);
+ } else {
+ EmitLabelLink(label);
+ }
+}
+
+
+void X86Assembler::EmitLabelLink(Label* label) {
+ CHECK(!label->IsBound());
+ int position = buffer_.Size();
+ EmitInt32(label->position_);
+ label->LinkTo(position);
+}
+
+
+void X86Assembler::EmitGenericShift(int reg_or_opcode,
+ Register reg,
+ const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int8());
+ if (imm.value() == 1) {
+ EmitUint8(0xD1);
+ EmitOperand(reg_or_opcode, Operand(reg));
+ } else {
+ EmitUint8(0xC1);
+ EmitOperand(reg_or_opcode, Operand(reg));
+ EmitUint8(imm.value() & 0xFF);
+ }
+}
+
+
+void X86Assembler::EmitGenericShift(int reg_or_opcode,
+ Register operand,
+ Register shifter) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK_EQ(shifter, ECX);
+ EmitUint8(0xD3);
+ EmitOperand(reg_or_opcode, Operand(operand));
+}
+
+void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& spill_regs,
+ const std::vector<ManagedRegister>& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ pushl(spill_regs.at(i).AsX86().AsCpuRegister());
+ }
+ // return address then method on stack
+ addl(ESP, Immediate(-frame_size + (spill_regs.size() * kPointerSize) +
+ kPointerSize /*method*/ + kPointerSize /*return address*/));
+ pushl(method_reg.AsX86().AsCpuRegister());
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ movl(Address(ESP, frame_size + kPointerSize + (i * kPointerSize)),
+ entry_spills.at(i).AsX86().AsCpuRegister());
+ }
+}
+
+void X86Assembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ addl(ESP, Immediate(frame_size - (spill_regs.size() * kPointerSize) - kPointerSize));
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ popl(spill_regs.at(i).AsX86().AsCpuRegister());
+ }
+ ret();
+}
+
+void X86Assembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ addl(ESP, Immediate(-adjust));
+}
+
+void X86Assembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ addl(ESP, Immediate(adjust));
+}
+
+void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister src = msrc.AsX86();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ movl(Address(ESP, offs), src.AsCpuRegister());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ movl(Address(ESP, offs), src.AsRegisterPairLow());
+ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)),
+ src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ fstps(Address(ESP, offs));
+ } else {
+ fstpl(Address(ESP, offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ movss(Address(ESP, offs), src.AsXmmRegister());
+ } else {
+ movsd(Address(ESP, offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister) {
+ movl(Address(ESP, dest), Immediate(imm));
+}
+
+void X86Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister) {
+ fs()->movl(Address::Absolute(dest), Immediate(imm));
+}
+
+void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ fs()->movl(Address::Absolute(thr_offs), ESP);
+}
+
+void X86Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) {
+ fs()->movl(Address::Absolute(thr_offs), lbl);
+}
+
+void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
+ FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ movl(dest.AsCpuRegister(), Address(ESP, src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ movl(dest.AsRegisterPairLow(), Address(ESP, src));
+ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ flds(Address(ESP, src));
+ } else {
+ fldl(Address(ESP, src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ movss(dest.AsXmmRegister(), Address(ESP, src));
+ } else {
+ movsd(dest.AsXmmRegister(), Address(ESP, src));
+ }
+ }
+}
+
+void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
+ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ fs()->flds(Address::Absolute(src));
+ } else {
+ fs()->fldl(Address::Absolute(src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
+ } else {
+ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
+ }
+ }
+}
+
+void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest,
+ ThreadOffset offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ X86ManagedRegister src = msrc.AsX86();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ movl(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ subl(ESP, Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ fstps(Address(ESP, 0));
+ movss(dest.AsXmmRegister(), Address(ESP, 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ fstpl(Address(ESP, 0));
+ movsd(dest.AsXmmRegister(), Address(ESP, 0));
+ }
+ addl(ESP, Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ movl(scratch.AsCpuRegister(), Address(ESP, src));
+ movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset thr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+ Store(fr_offs, scratch, 4);
+}
+
+void X86Assembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 4);
+ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86Assembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
+ ManagedRegister /*scratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ pushl(Address(ESP, src));
+ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ movl(scratch, Address(ESP, src_base));
+ movl(scratch, Address(scratch, src_offset));
+ movl(Address(ESP, dest), scratch);
+}
+
+void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
+ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ movl(scratch, Address(ESP, src));
+ pushl(Address(scratch, src_offset));
+ popl(Address(scratch, dest_offset));
+}
+
+void X86Assembler::MemoryBarrier(ManagedRegister) {
+#if ANDROID_SMP != 0
+ mfence();
+#endif
+}
+
+void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
+ FrameOffset sirt_offset,
+ ManagedRegister min_reg, bool null_allowed) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ j(kZero, &null_arg);
+ leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ Bind(&null_arg);
+ } else {
+ leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ }
+}
+
+void X86Assembler::CreateSirtEntry(FrameOffset out_off,
+ FrameOffset sirt_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ j(kZero, &null_arg);
+ leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ Bind(&null_arg);
+ } else {
+ leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ }
+ Store(out_off, scratch, 4);
+}
+
+// Given a SIRT entry, load the associated reference.
+void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ j(kZero, &null_arg);
+ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ Bind(&null_arg);
+}
+
+void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86ManagedRegister base = mbase.AsX86();
+ CHECK(base.IsCpuRegister());
+ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ movl(scratch, Address(ESP, base));
+ call(Address(scratch, offset));
+}
+
+void X86Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) {
+ fs()->call(Address::Absolute(offset));
+}
+
+void X86Assembler::GetCurrentThread(ManagedRegister tr) {
+ fs()->movl(tr.AsX86().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset()));
+}
+
+void X86Assembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset()));
+ movl(Address(ESP, offset), scratch.AsCpuRegister());
+}
+
+void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust);
+ buffer_.EnqueueSlowPath(slow);
+ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0));
+ j(kNotEqual, slow->Entry());
+}
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
+ // Pass exception as argument in EAX
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
+ __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86
+} // namespace art
diff --git a/runtime/oat/utils/x86/assembler_x86.h b/runtime/oat/utils/x86/assembler_x86.h
new file mode 100644
index 0000000..dddb9b1
--- /dev/null
+++ b/runtime/oat/utils/x86/assembler_x86.h
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_X86_ASSEMBLER_X86_H_
+#define ART_SRC_OAT_UTILS_X86_ASSEMBLER_X86_H_
+
+#include <vector>
+#include "base/macros.h"
+#include "constants_x86.h"
+#include "globals.h"
+#include "managed_register_x86.h"
+#include "oat/utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace x86 {
+
+class Immediate {
+ public:
+ explicit Immediate(int32_t value) : value_(value) {}
+
+ int32_t value() const { return value_; }
+
+ bool is_int8() const { return IsInt(8, value_); }
+ bool is_uint8() const { return IsUint(8, value_); }
+ bool is_uint16() const { return IsUint(16, value_); }
+
+ private:
+ const int32_t value_;
+
+ // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+ DISALLOW_COPY_AND_ASSIGN(Immediate);
+#endif
+};
+
+
+class Operand {
+ public:
+ uint8_t mod() const {
+ return (encoding_at(0) >> 6) & 3;
+ }
+
+ Register rm() const {
+ return static_cast<Register>(encoding_at(0) & 7);
+ }
+
+ ScaleFactor scale() const {
+ return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
+ }
+
+ Register index() const {
+ return static_cast<Register>((encoding_at(1) >> 3) & 7);
+ }
+
+ Register base() const {
+ return static_cast<Register>(encoding_at(1) & 7);
+ }
+
+ int8_t disp8() const {
+ CHECK_GE(length_, 2);
+ return static_cast<int8_t>(encoding_[length_ - 1]);
+ }
+
+ int32_t disp32() const {
+ CHECK_GE(length_, 5);
+ int32_t value;
+ memcpy(&value, &encoding_[length_ - 4], sizeof(value));
+ return value;
+ }
+
+ bool IsRegister(Register reg) const {
+ return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only.
+ && ((encoding_[0] & 0x07) == reg); // Register codes match.
+ }
+
+ protected:
+ // Operand can be sub classed (e.g: Address).
+ Operand() : length_(0) { }
+
+ void SetModRM(int mod, Register rm) {
+ CHECK_EQ(mod & ~3, 0);
+ encoding_[0] = (mod << 6) | rm;
+ length_ = 1;
+ }
+
+ void SetSIB(ScaleFactor scale, Register index, Register base) {
+ CHECK_EQ(length_, 1);
+ CHECK_EQ(scale & ~3, 0);
+ encoding_[1] = (scale << 6) | (index << 3) | base;
+ length_ = 2;
+ }
+
+ void SetDisp8(int8_t disp) {
+ CHECK(length_ == 1 || length_ == 2);
+ encoding_[length_++] = static_cast<uint8_t>(disp);
+ }
+
+ void SetDisp32(int32_t disp) {
+ CHECK(length_ == 1 || length_ == 2);
+ int disp_size = sizeof(disp);
+ memmove(&encoding_[length_], &disp, disp_size);
+ length_ += disp_size;
+ }
+
+ private:
+ byte length_;
+ byte encoding_[6];
+ byte padding_;
+
+ explicit Operand(Register reg) { SetModRM(3, reg); }
+
+ // Get the operand encoding byte at the given index.
+ uint8_t encoding_at(int index) const {
+ CHECK_GE(index, 0);
+ CHECK_LT(index, length_);
+ return encoding_[index];
+ }
+
+ friend class X86Assembler;
+
+ // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+ DISALLOW_COPY_AND_ASSIGN(Operand);
+#endif
+};
+
+
+class Address : public Operand {
+ public:
+ Address(Register base, int32_t disp) {
+ Init(base, disp);
+ }
+
+ Address(Register base, Offset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ Address(Register base, FrameOffset disp) {
+ CHECK_EQ(base, ESP);
+ Init(ESP, disp.Int32Value());
+ }
+
+ Address(Register base, MemberOffset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ void Init(Register base, int32_t disp) {
+ if (disp == 0 && base != EBP) {
+ SetModRM(0, base);
+ if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ } else if (disp >= -128 && disp <= 127) {
+ SetModRM(1, base);
+ if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetDisp8(disp);
+ } else {
+ SetModRM(2, base);
+ if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetDisp32(disp);
+ }
+ }
+
+
+ Address(Register index, ScaleFactor scale, int32_t disp) {
+ CHECK_NE(index, ESP); // Illegal addressing mode.
+ SetModRM(0, ESP);
+ SetSIB(scale, index, EBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
+ CHECK_NE(index, ESP); // Illegal addressing mode.
+ if (disp == 0 && base != EBP) {
+ SetModRM(0, ESP);
+ SetSIB(scale, index, base);
+ } else if (disp >= -128 && disp <= 127) {
+ SetModRM(1, ESP);
+ SetSIB(scale, index, base);
+ SetDisp8(disp);
+ } else {
+ SetModRM(2, ESP);
+ SetSIB(scale, index, base);
+ SetDisp32(disp);
+ }
+ }
+
+ static Address Absolute(uword addr) {
+ Address result;
+ result.SetModRM(0, EBP);
+ result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset addr) {
+ return Absolute(addr.Int32Value());
+ }
+
+ private:
+ Address() {}
+
+ // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+ DISALLOW_COPY_AND_ASSIGN(Address);
+#endif
+};
+
+
+class X86Assembler : public Assembler {
+ public:
+ X86Assembler() {}
+ virtual ~X86Assembler() {}
+
+ /*
+ * Emit Machine Instructions.
+ */
+ void call(Register reg);
+ void call(const Address& address);
+ void call(Label* label);
+
+ void pushl(Register reg);
+ void pushl(const Address& address);
+ void pushl(const Immediate& imm);
+
+ void popl(Register reg);
+ void popl(const Address& address);
+
+ void movl(Register dst, const Immediate& src);
+ void movl(Register dst, Register src);
+
+ void movl(Register dst, const Address& src);
+ void movl(const Address& dst, Register src);
+ void movl(const Address& dst, const Immediate& imm);
+ void movl(const Address& dst, Label* lbl);
+
+ void movzxb(Register dst, ByteRegister src);
+ void movzxb(Register dst, const Address& src);
+ void movsxb(Register dst, ByteRegister src);
+ void movsxb(Register dst, const Address& src);
+ void movb(Register dst, const Address& src);
+ void movb(const Address& dst, ByteRegister src);
+ void movb(const Address& dst, const Immediate& imm);
+
+ void movzxw(Register dst, Register src);
+ void movzxw(Register dst, const Address& src);
+ void movsxw(Register dst, Register src);
+ void movsxw(Register dst, const Address& src);
+ void movw(Register dst, const Address& src);
+ void movw(const Address& dst, Register src);
+
+ void leal(Register dst, const Address& src);
+
+ void cmovl(Condition condition, Register dst, Register src);
+
+ void setb(Condition condition, Register dst);
+
+ void movss(XmmRegister dst, const Address& src);
+ void movss(const Address& dst, XmmRegister src);
+ void movss(XmmRegister dst, XmmRegister src);
+
+ void movd(XmmRegister dst, Register src);
+ void movd(Register dst, XmmRegister src);
+
+ void addss(XmmRegister dst, XmmRegister src);
+ void addss(XmmRegister dst, const Address& src);
+ void subss(XmmRegister dst, XmmRegister src);
+ void subss(XmmRegister dst, const Address& src);
+ void mulss(XmmRegister dst, XmmRegister src);
+ void mulss(XmmRegister dst, const Address& src);
+ void divss(XmmRegister dst, XmmRegister src);
+ void divss(XmmRegister dst, const Address& src);
+
+ void movsd(XmmRegister dst, const Address& src);
+ void movsd(const Address& dst, XmmRegister src);
+ void movsd(XmmRegister dst, XmmRegister src);
+
+ void addsd(XmmRegister dst, XmmRegister src);
+ void addsd(XmmRegister dst, const Address& src);
+ void subsd(XmmRegister dst, XmmRegister src);
+ void subsd(XmmRegister dst, const Address& src);
+ void mulsd(XmmRegister dst, XmmRegister src);
+ void mulsd(XmmRegister dst, const Address& src);
+ void divsd(XmmRegister dst, XmmRegister src);
+ void divsd(XmmRegister dst, const Address& src);
+
+ void cvtsi2ss(XmmRegister dst, Register src);
+ void cvtsi2sd(XmmRegister dst, Register src);
+
+ void cvtss2si(Register dst, XmmRegister src);
+ void cvtss2sd(XmmRegister dst, XmmRegister src);
+
+ void cvtsd2si(Register dst, XmmRegister src);
+ void cvtsd2ss(XmmRegister dst, XmmRegister src);
+
+ void cvttss2si(Register dst, XmmRegister src);
+ void cvttsd2si(Register dst, XmmRegister src);
+
+ void cvtdq2pd(XmmRegister dst, XmmRegister src);
+
+ void comiss(XmmRegister a, XmmRegister b);
+ void comisd(XmmRegister a, XmmRegister b);
+
+ void sqrtsd(XmmRegister dst, XmmRegister src);
+ void sqrtss(XmmRegister dst, XmmRegister src);
+
+ void xorpd(XmmRegister dst, const Address& src);
+ void xorpd(XmmRegister dst, XmmRegister src);
+ void xorps(XmmRegister dst, const Address& src);
+ void xorps(XmmRegister dst, XmmRegister src);
+
+ void andpd(XmmRegister dst, const Address& src);
+
+ void flds(const Address& src);
+ void fstps(const Address& dst);
+
+ void fldl(const Address& src);
+ void fstpl(const Address& dst);
+
+ void fnstcw(const Address& dst);
+ void fldcw(const Address& src);
+
+ void fistpl(const Address& dst);
+ void fistps(const Address& dst);
+ void fildl(const Address& src);
+
+ void fincstp();
+ void ffree(const Immediate& index);
+
+ void fsin();
+ void fcos();
+ void fptan();
+
+ void xchgl(Register dst, Register src);
+ void xchgl(Register reg, const Address& address);
+
+ void cmpl(Register reg, const Immediate& imm);
+ void cmpl(Register reg0, Register reg1);
+ void cmpl(Register reg, const Address& address);
+
+ void cmpl(const Address& address, Register reg);
+ void cmpl(const Address& address, const Immediate& imm);
+
+ void testl(Register reg1, Register reg2);
+ void testl(Register reg, const Immediate& imm);
+
+ void andl(Register dst, const Immediate& imm);
+ void andl(Register dst, Register src);
+
+ void orl(Register dst, const Immediate& imm);
+ void orl(Register dst, Register src);
+
+ void xorl(Register dst, Register src);
+
+ void addl(Register dst, Register src);
+ void addl(Register reg, const Immediate& imm);
+ void addl(Register reg, const Address& address);
+
+ void addl(const Address& address, Register reg);
+ void addl(const Address& address, const Immediate& imm);
+
+ void adcl(Register dst, Register src);
+ void adcl(Register reg, const Immediate& imm);
+ void adcl(Register dst, const Address& address);
+
+ void subl(Register dst, Register src);
+ void subl(Register reg, const Immediate& imm);
+ void subl(Register reg, const Address& address);
+
+ void cdq();
+
+ void idivl(Register reg);
+
+ void imull(Register dst, Register src);
+ void imull(Register reg, const Immediate& imm);
+ void imull(Register reg, const Address& address);
+
+ void imull(Register reg);
+ void imull(const Address& address);
+
+ void mull(Register reg);
+ void mull(const Address& address);
+
+ void sbbl(Register dst, Register src);
+ void sbbl(Register reg, const Immediate& imm);
+ void sbbl(Register reg, const Address& address);
+
+ void incl(Register reg);
+ void incl(const Address& address);
+
+ void decl(Register reg);
+ void decl(const Address& address);
+
+ void shll(Register reg, const Immediate& imm);
+ void shll(Register operand, Register shifter);
+ void shrl(Register reg, const Immediate& imm);
+ void shrl(Register operand, Register shifter);
+ void sarl(Register reg, const Immediate& imm);
+ void sarl(Register operand, Register shifter);
+ void shld(Register dst, Register src);
+
+ void negl(Register reg);
+ void notl(Register reg);
+
+ void enter(const Immediate& imm);
+ void leave();
+
+ void ret();
+ void ret(const Immediate& imm);
+
+ void nop();
+ void int3();
+ void hlt();
+
+ void j(Condition condition, Label* label);
+
+ void jmp(Register reg);
+ void jmp(const Address& address);
+ void jmp(Label* label);
+
+ X86Assembler* lock();
+ void cmpxchgl(const Address& address, Register reg);
+
+ void mfence();
+
+ X86Assembler* fs();
+
+ //
+ // Macros for High-level operations.
+ //
+
+ void AddImmediate(Register reg, const Immediate& imm);
+
+ void LoadDoubleConstant(XmmRegister dst, double value);
+
+ void DoubleNegate(XmmRegister d);
+ void FloatNegate(XmmRegister f);
+
+ void DoubleAbs(XmmRegister reg);
+
+ void LockCmpxchgl(const Address& address, Register reg) {
+ lock()->cmpxchgl(address, reg);
+ }
+
+ //
+ // Misc. functionality
+ //
+ int PreferredLoopAlignment() { return 16; }
+ void Align(int alignment, int offset);
+ void Bind(Label* label);
+
+ // Debugging and bringup support.
+ void Stop(const char* message);
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const std::vector<ManagedRegister>& entry_spills);
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs);
+
+ virtual void IncreaseFrameSize(size_t adjust);
+ virtual void DecreaseFrameSize(size_t adjust);
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+ void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl);
+
+ virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+ FrameOffset in_off, ManagedRegister scratch);
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+ virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src);
+
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs);
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+ Offset offs);
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest,
+ ThreadOffset offs);
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void MemoryBarrier(ManagedRegister);
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr);
+ virtual void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch);
+
+ // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the SIRT entry to see if the value is
+ // NULL.
+ virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed.
+ virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ ManagedRegister scratch, bool null_allowed);
+
+ // src holds a SIRT entry (Object**) load this into dst
+ virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ ManagedRegister src);
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+ virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(FrameOffset base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ private:
+ inline void EmitUint8(uint8_t value);
+ inline void EmitInt32(int32_t value);
+ inline void EmitRegisterOperand(int rm, int reg);
+ inline void EmitXmmRegisterOperand(int rm, XmmRegister reg);
+ inline void EmitFixup(AssemblerFixup* fixup);
+ inline void EmitOperandSizeOverride();
+
+ void EmitOperand(int rm, const Operand& operand);
+ void EmitImmediate(const Immediate& imm);
+ void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
+ void EmitLabel(Label* label, int instruction_size);
+ void EmitLabelLink(Label* label);
+ void EmitNearLabelLink(Label* label);
+
+ void EmitGenericShift(int rm, Register reg, const Immediate& imm);
+ void EmitGenericShift(int rm, Register operand, Register shifter);
+
+ DISALLOW_COPY_AND_ASSIGN(X86Assembler);
+};
+
+inline void X86Assembler::EmitUint8(uint8_t value) {
+ buffer_.Emit<uint8_t>(value);
+}
+
+inline void X86Assembler::EmitInt32(int32_t value) {
+ buffer_.Emit<int32_t>(value);
+}
+
+inline void X86Assembler::EmitRegisterOperand(int rm, int reg) {
+ CHECK_GE(rm, 0);
+ CHECK_LT(rm, 8);
+ buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
+}
+
+inline void X86Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) {
+ EmitRegisterOperand(rm, static_cast<Register>(reg));
+}
+
+inline void X86Assembler::EmitFixup(AssemblerFixup* fixup) {
+ buffer_.EmitFixup(fixup);
+}
+
+inline void X86Assembler::EmitOperandSizeOverride() {
+ EmitUint8(0x66);
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath : public SlowPath {
+ public:
+ X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm);
+ private:
+ const size_t stack_adjust_;
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_X86_ASSEMBLER_X86_H_
diff --git a/runtime/oat/utils/x86/assembler_x86_test.cc b/runtime/oat/utils/x86/assembler_x86_test.cc
new file mode 100644
index 0000000..5d8a3b1
--- /dev/null
+++ b/runtime/oat/utils/x86/assembler_x86_test.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(AssemblerX86, CreateBuffer) {
+ AssemblerBuffer buffer;
+ AssemblerBuffer::EnsureCapacity ensured(&buffer);
+ buffer.Emit<uint8_t>(0x42);
+ ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
+ buffer.Emit<int32_t>(42);
+ ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
+}
+
+} // namespace art
diff --git a/runtime/oat/utils/x86/managed_register_x86.cc b/runtime/oat/utils/x86/managed_register_x86.cc
new file mode 100644
index 0000000..4697d06
--- /dev/null
+++ b/runtime/oat/utils/x86/managed_register_x86.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_x86.h"
+
+#include "globals.h"
+
+namespace art {
+namespace x86 {
+
+// These cpu registers are never available for allocation.
+static const Register kReservedCpuRegistersArray[] = { ESP };
+
+
+// We reduce the number of available registers for allocation in debug-code
+// mode in order to increase register pressure.
+
+// We need all registers for caching.
+static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters;
+static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Define register pairs.
+// This list must be kept in sync with the RegisterPair enum.
+#define REGISTER_PAIR_LIST(P) \
+ P(EAX, EDX) \
+ P(EAX, ECX) \
+ P(EAX, EBX) \
+ P(EAX, EDI) \
+ P(EDX, ECX) \
+ P(EDX, EBX) \
+ P(EDX, EDI) \
+ P(ECX, EBX) \
+ P(ECX, EDI) \
+ P(EBX, EDI)
+
+
+struct RegisterPairDescriptor {
+ RegisterPair reg; // Used to verify that the enum is in sync.
+ Register low;
+ Register high;
+};
+
+
+static const RegisterPairDescriptor kRegisterPairs[] = {
+#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high },
+ REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION)
+#undef REGISTER_PAIR_ENUMERATION
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+ os << X86ManagedRegister::FromRegisterPair(reg);
+ return os;
+}
+
+bool X86ManagedRegister::Overlaps(const X86ManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ CHECK(IsValidManagedRegister());
+ CHECK(other.IsValidManagedRegister());
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return X86ManagedRegister::FromCpuRegister(low).Overlaps(other) ||
+ X86ManagedRegister::FromCpuRegister(high).Overlaps(other);
+ }
+ if (other.IsRegisterPair()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int X86ManagedRegister::AllocIdLow() const {
+ CHECK(IsRegisterPair());
+ const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds);
+ CHECK_EQ(r, kRegisterPairs[r].reg);
+ return kRegisterPairs[r].low;
+}
+
+
+int X86ManagedRegister::AllocIdHigh() const {
+ CHECK(IsRegisterPair());
+ const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds);
+ CHECK_EQ(r, kRegisterPairs[r].reg);
+ return kRegisterPairs[r].high;
+}
+
+
+void X86ManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsXmmRegister()) {
+ os << "XMM: " << static_cast<int>(AsXmmRegister());
+ } else if (IsX87Register()) {
+ os << "X87: " << static_cast<int>(AsX87Register());
+ } else if (IsCpuRegister()) {
+ os << "CPU: " << static_cast<int>(AsCpuRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+} // namespace x86
+} // namespace art
diff --git a/runtime/oat/utils/x86/managed_register_x86.h b/runtime/oat/utils/x86/managed_register_x86.h
new file mode 100644
index 0000000..4481456
--- /dev/null
+++ b/runtime/oat/utils/x86/managed_register_x86.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
+#define ART_SRC_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
+
+#include "constants_x86.h"
+#include "oat/utils/managed_register.h"
+
+namespace art {
+namespace x86 {
+
+// Values for register pairs.
+// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs.
+// The table kRegisterPairs in x86.cc must be kept in sync with this enum.
+enum RegisterPair {
+ EAX_EDX = 0,
+ EAX_ECX = 1,
+ EAX_EBX = 2,
+ EAX_EDI = 3,
+ EDX_ECX = 4,
+ EDX_EBX = 5,
+ EDX_EDI = 6,
+ ECX_EBX = 7,
+ ECX_EDI = 8,
+ EBX_EDI = 9,
+ kNumberOfRegisterPairs = 10,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCpuRegIds = kNumberOfCpuRegisters;
+const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters;
+
+const int kNumberOfXmmRegIds = kNumberOfXmmRegisters;
+const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters;
+
+const int kNumberOfX87RegIds = kNumberOfX87Registers;
+const int kNumberOfX87AllocIds = kNumberOfX87Registers;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
+ kNumberOfX87RegIds;
+
+// Register ids map:
+// [0..R[ cpu registers (enum Register)
+// [R..X[ xmm registers (enum XmmRegister)
+// [X..S[ x87 registers (enum X87Register)
+// [S..P[ register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCpuRegIds
+// X = R + kNumberOfXmmRegIds
+// S = X + kNumberOfX87RegIds
+// P = X + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ cpu registers (enum Register)
+// [R..X[ xmm registers (enum XmmRegister)
+// [X..S[ x87 registers (enum X87Register)
+// where
+// R = kNumberOfCpuRegIds
+// X = R + kNumberOfXmmRegIds
+// S = X + kNumberOfX87RegIds
+
+
+// An instance of class 'ManagedRegister' represents a single cpu register (enum
+// Register), an xmm register (enum XmmRegister), or a pair of cpu registers
+// (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class X86ManagedRegister : public ManagedRegister {
+ public:
+ ByteRegister AsByteRegister() const {
+ CHECK(IsCpuRegister());
+ CHECK_LT(AsCpuRegister(), ESP); // ESP, EBP, ESI and EDI cannot be encoded as byte registers.
+ return static_cast<ByteRegister>(id_);
+ }
+
+ Register AsCpuRegister() const {
+ CHECK(IsCpuRegister());
+ return static_cast<Register>(id_);
+ }
+
+ XmmRegister AsXmmRegister() const {
+ CHECK(IsXmmRegister());
+ return static_cast<XmmRegister>(id_ - kNumberOfCpuRegIds);
+ }
+
+ X87Register AsX87Register() const {
+ CHECK(IsX87Register());
+ return static_cast<X87Register>(id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds));
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCpuRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCpuRegister();
+ }
+
+ bool IsCpuRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCpuRegIds);
+ }
+
+ bool IsXmmRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCpuRegIds;
+ return (0 <= test) && (test < kNumberOfXmmRegIds);
+ }
+
+ bool IsX87Register() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+ return (0 <= test) && (test < kNumberOfX87RegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ void Print(std::ostream& os) const;
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const X86ManagedRegister& other) const;
+
+ static X86ManagedRegister FromCpuRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static X86ManagedRegister FromXmmRegister(XmmRegister r) {
+ CHECK_NE(r, kNoXmmRegister);
+ return FromRegId(r + kNumberOfCpuRegIds);
+ }
+
+ static X86ManagedRegister FromX87Register(X87Register r) {
+ CHECK_NE(r, kNoX87Register);
+ return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+ }
+
+ static X86ManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds));
+ }
+
+ private:
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() && !IsRegisterPair());
+ CHECK_LT(id_, kNumberOfAllocIds);
+ return id_;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ friend class ManagedRegister;
+
+ explicit X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+ static X86ManagedRegister FromRegId(int reg_id) {
+ X86ManagedRegister reg(reg_id);
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg);
+
+} // namespace x86
+
+inline x86::X86ManagedRegister ManagedRegister::AsX86() const {
+ x86::X86ManagedRegister reg(id_);
+ CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+ return reg;
+}
+
+} // namespace art
+
+#endif // ART_SRC_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
diff --git a/runtime/oat/utils/x86/managed_register_x86_test.cc b/runtime/oat/utils/x86/managed_register_x86_test.cc
new file mode 100644
index 0000000..4fbafda
--- /dev/null
+++ b/runtime/oat/utils/x86/managed_register_x86_test.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_x86.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace x86 {
+
+TEST(X86ManagedRegister, NoRegister) {
+ X86ManagedRegister reg = ManagedRegister::NoRegister().AsX86();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(X86ManagedRegister, CpuRegister) {
+ X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsCpuRegister());
+
+ reg = X86ManagedRegister::FromCpuRegister(EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(EBX, reg.AsCpuRegister());
+
+ reg = X86ManagedRegister::FromCpuRegister(ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ECX, reg.AsCpuRegister());
+
+ reg = X86ManagedRegister::FromCpuRegister(EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(EDI, reg.AsCpuRegister());
+}
+
+TEST(X86ManagedRegister, XmmRegister) {
+ X86ManagedRegister reg = X86ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM0, reg.AsXmmRegister());
+
+ reg = X86ManagedRegister::FromXmmRegister(XMM1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM1, reg.AsXmmRegister());
+
+ reg = X86ManagedRegister::FromXmmRegister(XMM7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM7, reg.AsXmmRegister());
+}
+
+TEST(X86ManagedRegister, X87Register) {
+ X86ManagedRegister reg = X86ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST0, reg.AsX87Register());
+
+ reg = X86ManagedRegister::FromX87Register(ST1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST1, reg.AsX87Register());
+
+ reg = X86ManagedRegister::FromX87Register(ST7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST7, reg.AsX87Register());
+}
+
+TEST(X86ManagedRegister, RegisterPair) {
+ X86ManagedRegister reg = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDX, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EAX_ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EAX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EAX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EDX_ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EDX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EDX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(ECX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(ECX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+ reg = X86ManagedRegister::FromRegisterPair(EBX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EBX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+}
+
+TEST(X86ManagedRegister, Equals) {
+ X86ManagedRegister reg_eax = X86ManagedRegister::FromCpuRegister(EAX);
+ EXPECT_TRUE(reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ X86ManagedRegister reg_xmm0 = X86ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ X86ManagedRegister reg_st0 = X86ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(reg_st0.Equals(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ X86ManagedRegister reg_pair = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+}
+
+TEST(X86ManagedRegister, Overlaps) {
+ X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX);
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromCpuRegister(EDX);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromCpuRegister(EDI);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromCpuRegister(EBX);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_ECX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86ManagedRegister::FromRegisterPair(EBX_EDI);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX)));
+
+ reg = X86ManagedRegister::FromRegisterPair(EDX_ECX);
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+ EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX)));
+}
+
+} // namespace x86
+} // namespace art