summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--compiler/dex/quick/arm/call_arm.cc6
-rw-r--r--compiler/dex/quick/arm/fp_arm.cc16
-rw-r--r--compiler/dex/quick/arm/int_arm.cc4
-rw-r--r--compiler/dex/quick/gen_common.cc101
-rw-r--r--compiler/dex/quick/gen_invoke.cc36
-rw-r--r--compiler/dex/quick/mips/call_mips.cc6
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc26
-rw-r--r--compiler/dex/quick/mips/int_mips.cc2
-rw-r--r--compiler/dex/quick/x86/call_x86.cc6
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc14
-rw-r--r--compiler/dex/quick/x86/int_x86.cc2
-rw-r--r--compiler/jni/quick/jni_compiler.cc12
-rw-r--r--compiler/stubs/portable/stubs.cc7
-rw-r--r--compiler/stubs/quick/stubs.cc18
-rw-r--r--compiler/utils/arm/assembler_arm.cc2
-rw-r--r--compiler/utils/mips/assembler_mips.cc2
-rw-r--r--compiler/utils/x86/assembler_x86.cc2
-rw-r--r--runtime/Android.mk62
-rw-r--r--runtime/arch/arm/asm_support_arm.S38
-rw-r--r--runtime/arch/arm/asm_support_arm.h31
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc (renamed from runtime/arch/arm/quick_entrypoints_init_arm.cc)160
-rw-r--r--runtime/arch/arm/jni_entrypoints_arm.S65
-rw-r--r--runtime/arch/arm/portable_entrypoints_arm.S96
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S127
-rw-r--r--runtime/arch/arm/thread_arm.cc (renamed from runtime/thread_arm.cc)4
-rw-r--r--runtime/arch/mips/asm_support_mips.S41
-rw-r--r--runtime/arch/mips/asm_support_mips.h31
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc (renamed from runtime/arch/mips/quick_entrypoints_init_mips.cc)158
-rw-r--r--runtime/arch/mips/jni_entrypoints_mips.S89
-rw-r--r--runtime/arch/mips/portable_entrypoints_mips.S73
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S115
-rw-r--r--runtime/arch/mips/thread_mips.cc (renamed from runtime/thread_mips.cc)4
-rw-r--r--runtime/arch/x86/asm_support_x86.S91
-rw-r--r--runtime/arch/x86/asm_support_x86.h27
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc (renamed from runtime/arch/x86/quick_entrypoints_init_x86.cc)145
-rw-r--r--runtime/arch/x86/jni_entrypoints_x86.S35
-rw-r--r--runtime/arch/x86/portable_entrypoints_x86.S109
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S172
-rw-r--r--runtime/arch/x86/thread_x86.cc (renamed from runtime/thread_x86.cc)2
-rw-r--r--runtime/asm_support.h25
-rw-r--r--runtime/class_linker.cc5
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/common_test.h2
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc (renamed from runtime/runtime_support.cc)70
-rw-r--r--runtime/entrypoints/entrypoint_utils.h (renamed from runtime/runtime_support.h)13
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc46
-rw-r--r--runtime/entrypoints/math_entrypoints.cc89
-rw-r--r--runtime/entrypoints/math_entrypoints.h (renamed from runtime/runtime_support_llvm.h)20
-rw-r--r--runtime/entrypoints/math_entrypoints_test.cc (renamed from runtime/runtime_support_test.cc)12
-rw-r--r--runtime/entrypoints/portable/portable_alloc_entrypoints.cc69
-rw-r--r--runtime/entrypoints/portable/portable_argument_visitor.h136
-rw-r--r--runtime/entrypoints/portable/portable_cast_entrypoints.cc57
-rw-r--r--runtime/entrypoints/portable/portable_dexcache_entrypoints.cc53
-rw-r--r--runtime/entrypoints/portable/portable_entrypoints.h44
-rw-r--r--runtime/entrypoints/portable/portable_field_entrypoints.cc241
-rw-r--r--runtime/entrypoints/portable/portable_fillarray_entrypoints.cc50
-rw-r--r--runtime/entrypoints/portable/portable_invoke_entrypoints.cc104
-rw-r--r--runtime/entrypoints/portable/portable_jni_entrypoints.cc98
-rw-r--r--runtime/entrypoints/portable/portable_lock_entrypoints.cc38
-rw-r--r--runtime/entrypoints/portable/portable_proxy_entrypoints.cc109
-rw-r--r--runtime/entrypoints/portable/portable_stub_entrypoints.cc145
-rw-r--r--runtime/entrypoints/portable/portable_thread_entrypoints.cc99
-rw-r--r--runtime/entrypoints/portable/portable_throw_entrypoints.cc123
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_argument_visitor.h110
-rw-r--r--runtime/entrypoints/quick/quick_cast_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h15
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_fillarray_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_invoke_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_proxy_entrypoints.cc86
-rw-r--r--runtime/entrypoints/quick/quick_stub_entrypoints.cc143
-rw-r--r--runtime/entrypoints/quick/quick_thread_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc2
-rw-r--r--runtime/interpreter/interpreter.cc2
-rw-r--r--runtime/mirror/abstract_method-inl.h2
-rw-r--r--runtime/mirror/object_test.cc2
-rw-r--r--runtime/runtime_support_llvm.cc930
-rw-r--r--runtime/thread.cc177
-rw-r--r--runtime/thread.h8
83 files changed, 2783 insertions, 2301 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 4648d44..b9ebd83 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -34,6 +34,7 @@ TEST_COMMON_SRC_FILES := \
runtime/dex_file_test.cc \
runtime/dex_instruction_visitor_test.cc \
runtime/dex_method_iterator_test.cc \
+ runtime/entrypoints/math_entrypoints_test.cc \
runtime/exception_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
@@ -50,7 +51,6 @@ TEST_COMMON_SRC_FILES := \
runtime/oat_test.cc \
runtime/output_stream_test.cc \
runtime/reference_table_test.cc \
- runtime/runtime_support_test.cc \
runtime/runtime_test.cc \
runtime/thread_pool_test.cc \
runtime/utils_test.cc \
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 7c3ec14..745e43d 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -432,7 +432,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, r0);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
rARM_LR);
// Materialize a pointer to the fill data image
NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
@@ -488,7 +488,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
OpRegImm(kOpCmp, r1, 0);
OpIT(kCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
@@ -519,7 +519,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
OpIT(kCondEq, "EE");
StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 1bb08c4..08d6778 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -49,7 +49,8 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -91,7 +92,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode,
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -140,16 +142,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode,
op = kThumb2VcvtDI;
break;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -315,7 +317,7 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) {
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCalleeSave();
LockCallTemps(); // Using fixed registers
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
NewLIR1(kThumbBlxR, r_tgt);
NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 4bb507b..9db1016 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -665,7 +665,7 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
*/
RegLocation rl_result;
if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
- int func_offset = ENTRYPOINT_OFFSET(pLmul);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
FlushAllRegs();
CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
rl_result = GetReturnWide(false);
@@ -956,7 +956,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 8934340..ebe10bb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -211,9 +211,9 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
int func_offset;
if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode);
} else {
- func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+ func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
RegLocation rl_result = GetReturn(false);
@@ -233,9 +233,9 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
int func_offset;
if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
} else {
- func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
FreeTemp(TargetReg(kArg2));
@@ -375,7 +375,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
// TUNING: fast path should fall through
LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
LoadConstant(TargetReg(kArg0), ssb_index);
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
if (cu_->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
OpRegCopy(rBase, TargetReg(kRet0));
@@ -408,9 +408,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
FreeTemp(rBase);
} else {
FlushAllRegs(); // Everything to home locations
- int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
- (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
- : ENTRYPOINT_OFFSET(pSet32Static));
+ int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
+ : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
}
}
@@ -455,7 +455,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
// or NULL if not initialized. Check for NULL and call helper if NULL.
// TUNING: fast path should fall through
LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
if (cu_->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
OpRegCopy(rBase, TargetReg(kRet0));
@@ -483,9 +483,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
}
} else {
FlushAllRegs(); // Everything to home locations
- int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
- (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
- : ENTRYPOINT_OFFSET(pGet32Static));
+ int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
+ : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
CallRuntimeHelperImm(getterOffset, field_idx, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -499,7 +499,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
void Mir2Lir::HandleSuspendLaunchPads() {
int num_elems = suspend_launchpads_.Size();
- int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+ int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode);
for (int i = 0; i < num_elems; i++) {
ResetRegPool();
ResetDefTracking();
@@ -545,7 +545,7 @@ void Mir2Lir::HandleThrowLaunchPads() {
bool target_x86 = (cu_->instruction_set == kX86);
switch (lab->operands[0]) {
case kThrowNullPointer:
- func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
break;
case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
// v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
@@ -557,7 +557,7 @@ void Mir2Lir::HandleThrowLaunchPads() {
// Make sure the following LoadConstant doesn't mess with kArg1.
LockTemp(TargetReg(kArg1));
LoadConstant(TargetReg(kArg0), v2);
- func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowArrayBounds:
// Move v1 (array index) to kArg0 and v2 (array length) to kArg1
@@ -590,18 +590,18 @@ void Mir2Lir::HandleThrowLaunchPads() {
OpRegCopy(TargetReg(kArg0), v1);
}
}
- func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowDivZero:
- func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
break;
case kThrowNoSuchMethod:
OpRegCopy(TargetReg(kArg0), v1);
func_offset =
- ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+ QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
break;
case kThrowStackOverflow:
- func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
// Restore stack alignment
if (target_x86) {
OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
@@ -664,9 +664,9 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
StoreValue(rl_dest, rl_result);
}
} else {
- int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
- (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
- : ENTRYPOINT_OFFSET(pGet32Instance));
+ int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
+ : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -719,9 +719,9 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
}
}
} else {
- int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
- (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
- : ENTRYPOINT_OFFSET(pSet32Instance));
+ int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
+ : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
}
}
@@ -735,7 +735,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
@@ -764,7 +764,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// TUNING: move slow path to end & remove unconditional branch
LIR* target1 = NewLIR0(kPseudoTargetLabel);
// Call out to helper, which will return resolved type in kArg0
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
@@ -797,7 +797,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
LoadWordDisp(TargetReg(kArg2),
mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
+ int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode));
LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
LoadConstant(TargetReg(kArg1), string_idx);
if (cu_->instruction_set == kThumb2) {
@@ -821,7 +821,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
branch->target = target;
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2),
+ TargetReg(kArg1), true);
}
GenBarrier();
StoreValue(rl_dest, GetReturn(false));
@@ -847,9 +848,9 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
int func_offset;
if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
cu_->method_idx, *cu_->dex_file, type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode);
} else {
- func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethod(func_offset, type_idx, true);
RegLocation rl_result = GetReturn(false);
@@ -858,7 +859,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
void Mir2Lir::GenThrow(RegLocation rl_src) {
FlushAllRegs();
- CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
+ CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
}
// For final classes there are no sub-classes to check and so we can answer the instance-of
@@ -928,7 +929,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
@@ -950,7 +951,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Not resolved
// Call out to helper, which will return resolved type in kRet0
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */
// Rejoin code paths
@@ -985,7 +986,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
}
} else {
if (cu_->instruction_set == kThumb2) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
if (!type_known_abstract) {
/* Uses conditional nullification */
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
@@ -1002,13 +1003,13 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
if (cu_->instruction_set != kX86) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
FreeTemp(r_tgt);
} else {
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
}
}
}
@@ -1068,7 +1069,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
@@ -1088,8 +1089,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
// Not resolved
// Call out to helper, which will return resolved type in kArg0
// InitializeTypeFromCode(idx, method)
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
- true);
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
// Rejoin code paths
LIR* hop_target = NewLIR0(kPseudoTargetLabel);
@@ -1108,8 +1109,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
if (!type_known_abstract) {
branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
}
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
- true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1),
+ TargetReg(kArg2), true);
/* branch target here */
LIR* target = NewLIR0(kPseudoTargetLabel);
branch1->target = target;
@@ -1172,15 +1173,15 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pShlLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong);
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pShrLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong);
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pUshrLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong);
break;
default:
LOG(FATAL) << "Unexpected case";
@@ -1302,7 +1303,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
}
rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
} else {
- int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
int r_tgt = CallHelperSetup(func_offset);
@@ -1557,7 +1558,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
Clobber(TargetReg(kArg0));
- int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
if (is_div)
rl_result = GetReturn(false);
@@ -1634,7 +1635,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
} else {
call_out = true;
ret_reg = TargetReg(kRet0);
- func_offset = ENTRYPOINT_OFFSET(pLmul);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
}
break;
case Instruction::DIV_LONG:
@@ -1642,13 +1643,13 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
call_out = true;
check_zero = true;
ret_reg = TargetReg(kRet0);
- func_offset = ENTRYPOINT_OFFSET(pLdiv);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
call_out = true;
check_zero = true;
- func_offset = ENTRYPOINT_OFFSET(pLdivmod);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod);
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
break;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 91f2500..1b34e99 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -471,7 +471,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
direct_method = 0;
}
int trampoline = (cu->instruction_set == kX86) ? 0
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+ : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
if (direct_method != 0) {
switch (state) {
@@ -555,7 +555,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
uint32_t method_idx,
uintptr_t unused, uintptr_t unused2,
InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -563,7 +563,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -571,7 +571,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -579,7 +579,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -589,7 +589,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
uint32_t unused,
uintptr_t unused2, uintptr_t unused3,
InvokeType unused4) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -773,14 +773,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
if (info->num_arg_words >= 20) {
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
// Use vldm/vstm pair using kArg3 as a temp
@@ -1047,7 +1047,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
} else {
LoadValueDirectFixed(rl_start, reg_start);
}
- int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+ int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
intrinsic_launchpads_.Insert(launch_pad);
@@ -1056,7 +1056,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
}
LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
@@ -1084,7 +1084,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
LoadValueDirectFixed(rl_this, reg_this);
LoadValueDirectFixed(rl_cmp, reg_cmp);
int r_tgt = (cu_->instruction_set != kX86) ?
- LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+ LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
// TUNING: check if rl_cmp.s_reg_low is already null checked
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
@@ -1094,7 +1094,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
}
launch_pad->operands[2] = 0; // No return possible
// Record that we've already inlined & null checked
@@ -1409,20 +1409,20 @@ void Mir2Lir::GenInvoke(CallInfo* info) {
int trampoline = 0;
switch (info->type) {
case kInterface:
- trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+ : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
break;
case kDirect:
- trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
break;
case kStatic:
- trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
break;
case kSuper:
- trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
break;
case kVirtual:
- trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
break;
default:
LOG(FATAL) << "Unexpected invoke type";
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index b6c200c..846c055 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -247,7 +247,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
GenBarrier();
NewLIR0(kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with the helper load
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
GenBarrier(); // Scheduling barrier
// Construct BaseLabel and set up table base register
@@ -272,7 +272,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
LockCallTemps(); // Prepare for explicit register usage
GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - artLockObjectFromCode(self, obj);
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, r_tgt);
MarkSafepointPC(call_inst);
@@ -287,7 +287,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
LockCallTemps(); // Prepare for explicit register usage
GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - UnlockObjectFromCode(obj);
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, r_tgt);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 620527e..3203017 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -50,7 +50,8 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -92,7 +93,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -133,22 +135,22 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
op = kMipsFcvtdw;
break;
case Instruction::FLOAT_TO_INT:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_INT:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
return;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -178,18 +180,18 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
switch (opcode) {
case Instruction::CMPL_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmplFloat);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmplFloat);
wide = false;
break;
case Instruction::CMPG_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmpgFloat);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmpgFloat);
wide = false;
break;
case Instruction::CMPL_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmplDouble);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmplDouble);
break;
case Instruction::CMPG_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmpgDouble);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmpgDouble);
break;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 7c8214b..bd044c6 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -579,7 +579,7 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d530a1c..1c395de 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -148,7 +148,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
NewLIR1(kX86StartOfMethod, rX86_ARG2);
NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
rX86_ARG1, true);
}
@@ -165,7 +165,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
// If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
- CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+ CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
branch->target = NewLIR0(kPseudoTargetLabel);
}
@@ -185,7 +185,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
LIR* branch2 = NewLIR1(kX86Jmp8, 0);
branch->target = NewLIR0(kPseudoTargetLabel);
// Otherwise, go the expensive route - UnlockObjectFromCode(obj);
- CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+ CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
branch2->target = NewLIR0(kPseudoTargetLabel);
}
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cc6f374..f736b5e 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -49,7 +49,8 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -99,7 +100,8 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -196,17 +198,17 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
return;
}
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
// TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(INFO) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 3be24df..0b4b4be 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -532,7 +532,7 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index aeadb54..b069fbd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -172,8 +172,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodStart);
+ uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_sirt_offset(0);
if (is_synchronized) {
@@ -304,13 +304,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
uintptr_t jni_end;
if (reference_return) {
// Pass result.
- jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
+ jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodEnd);
+ jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc
index cee6847..def43e2 100644
--- a/compiler/stubs/portable/stubs.cc
+++ b/compiler/stubs/portable/stubs.cc
@@ -34,7 +34,8 @@ const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
__ PushList(save);
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, R12, TR,
+ PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
__ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
__ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr
__ IncreaseFrameSize(12); // 3 words of space for alignment
@@ -69,7 +70,7 @@ const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
__ StoreToOffset(kStoreWord, A0, SP, 0);
__ LoadFromOffset(kLoadWord, T9, S1,
- ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
__ Move(A3, S1); // Pass Thread::Current() in A3
__ Move(A2, SP); // Pass SP for Method** callee_addr
__ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
@@ -112,7 +113,7 @@ const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
__ pushl(ECX); // pass receiver
__ pushl(EAX); // pass called
// Call to resolve method.
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
+ __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
X86ManagedRegister::FromCpuRegister(ECX));
__ leave();
diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc
index 598481f..912f1c0 100644
--- a/compiler/stubs/quick/stubs.cc
+++ b/compiler/stubs/quick/stubs.cc
@@ -46,7 +46,7 @@ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
// TODO: enable when GetCalleeSaveMethod is available at stub generation time
// DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
__ PushList(save);
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
__ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
__ IncreaseFrameSize(8); // 2 words of space for alignment
__ mov(R2, ShifterOperand(SP)); // Pass SP
@@ -71,7 +71,7 @@ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
- __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ bkpt(0);
size_t cs = assembler->CodeSize();
@@ -85,7 +85,7 @@ const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
- __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
+ __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
__ bkpt(0);
size_t cs = assembler->CodeSize();
@@ -123,7 +123,7 @@ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
__ StoreToOffset(kStoreWord, A2, SP, 8);
__ StoreToOffset(kStoreWord, A1, SP, 4);
- __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
__ Move(A3, S1); // Pass Thread::Current() in A3
__ Move(A2, SP); // Pass SP for Method** callee_addr
__ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
@@ -161,7 +161,7 @@ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
- __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ Jr(T9);
__ Break();
@@ -176,7 +176,7 @@ const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
- __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ Jr(T9);
__ Break();
@@ -208,7 +208,7 @@ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
__ pushl(EAX); // pass Method*
// Call to resolve method.
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
+ __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
X86ManagedRegister::FromCpuRegister(ECX));
__ movl(EDI, EAX); // save code pointer in EDI
@@ -236,7 +236,7 @@ const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
- __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
+ __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
size_t cs = assembler->CodeSize();
UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
@@ -249,7 +249,7 @@ const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
- __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
+ __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
size_t cs = assembler->CodeSize();
UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 0778cd3..fa202c3 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1884,7 +1884,7 @@ void ArmExceptionSlowPath::Emit(Assembler* sasm) {
// Don't care about preserving R0 as this call won't return
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+ __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
__ blx(R12);
// Call never returns
__ bkpt(0);
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 58815da..931d7ab 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -988,7 +988,7 @@ void MipsExceptionSlowPath::Emit(Assembler* sasm) {
// Don't care about preserving A0 as this call won't return
__ Move(A0, scratch_.AsCoreRegister());
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException));
+ __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
__ Jr(T9);
// Call never returns
__ Break();
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 89bfeb5..9095180 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1837,7 +1837,7 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) {
}
// Pass exception as argument in EAX
__ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
- __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException)));
// this call should never return
__ int3();
#undef __
diff --git a/runtime/Android.mk b/runtime/Android.mk
index bc6a2ed..c686128 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -118,8 +118,6 @@ LIBART_COMMON_SRC_FILES := \
reference_table.cc \
reflection.cc \
runtime.cc \
- runtime_support.cc \
- runtime_support_llvm.cc \
signal_catcher.cc \
stack.cc \
thread.cc \
@@ -143,6 +141,21 @@ LIBART_COMMON_SRC_FILES += \
arch/arm/registers_arm.cc \
arch/x86/registers_x86.cc \
arch/mips/registers_mips.cc \
+ entrypoints/entrypoint_utils.cc \
+ entrypoints/jni/jni_entrypoints.cc \
+ entrypoints/math_entrypoints.cc \
+ entrypoints/portable/portable_alloc_entrypoints.cc \
+ entrypoints/portable/portable_cast_entrypoints.cc \
+ entrypoints/portable/portable_dexcache_entrypoints.cc \
+ entrypoints/portable/portable_field_entrypoints.cc \
+ entrypoints/portable/portable_fillarray_entrypoints.cc \
+ entrypoints/portable/portable_invoke_entrypoints.cc \
+ entrypoints/portable/portable_jni_entrypoints.cc \
+ entrypoints/portable/portable_lock_entrypoints.cc \
+ entrypoints/portable/portable_proxy_entrypoints.cc \
+ entrypoints/portable/portable_stub_entrypoints.cc \
+ entrypoints/portable/portable_thread_entrypoints.cc \
+ entrypoints/portable/portable_throw_entrypoints.cc \
entrypoints/quick/quick_alloc_entrypoints.cc \
entrypoints/quick/quick_cast_entrypoints.cc \
entrypoints/quick/quick_deoptimization_entrypoints.cc \
@@ -171,39 +184,35 @@ LIBART_TARGET_SRC_FILES := \
ifeq ($(TARGET_ARCH),arm)
LIBART_TARGET_SRC_FILES += \
arch/arm/context_arm.cc.arm \
+ arch/arm/entrypoints_init_arm.cc \
+ arch/arm/jni_entrypoints_arm.S \
+ arch/arm/portable_entrypoints_arm.S \
arch/arm/quick_entrypoints_arm.S \
- arch/arm/quick_entrypoints_init_arm.cc
+ arch/arm/thread_arm.cc
else # TARGET_ARCH != arm
ifeq ($(TARGET_ARCH),x86)
LIBART_TARGET_SRC_FILES += \
arch/x86/context_x86.cc \
- arch/x86/quick_entrypoints_init_x86.cc \
- arch/x86/quick_entrypoints_x86.S
+ arch/x86/entrypoints_init_x86.cc \
+ arch/x86/jni_entrypoints_x86.S \
+ arch/x86/portable_entrypoints_x86.S \
+ arch/x86/quick_entrypoints_x86.S \
+ arch/x86/thread_x86.cc
else # TARGET_ARCH != x86
ifeq ($(TARGET_ARCH),mips)
LIBART_TARGET_SRC_FILES += \
arch/mips/context_mips.cc \
- arch/mips/quick_entrypoints_init_mips.cc \
- arch/mips/quick_entrypoints_mips.S
+ arch/mips/entrypoints_init_mips.cc \
+ arch/mips/jni_entrypoints_mips.S \
+ arch/mips/portable_entrypoints_mips.S \
+ arch/mips/quick_entrypoints_mips.S \
+ arch/mips/thread_mips.cc
else # TARGET_ARCH != mips
$(error unsupported TARGET_ARCH=$(TARGET_ARCH))
endif # TARGET_ARCH != mips
endif # TARGET_ARCH != x86
endif # TARGET_ARCH != arm
-ifeq ($(TARGET_ARCH),arm)
-LIBART_TARGET_SRC_FILES += thread_arm.cc
-else # TARGET_ARCH != arm
-ifeq ($(TARGET_ARCH),x86)
-LIBART_TARGET_SRC_FILES += thread_x86.cc
-else # TARGET_ARCH != x86
-ifeq ($(TARGET_ARCH),mips)
-LIBART_TARGET_SRC_FILES += thread_mips.cc
-else # TARGET_ARCH != mips
-$(error unsupported TARGET_ARCH=$(TARGET_ARCH))
-endif # TARGET_ARCH != mips
-endif # TARGET_ARCH != x86
-endif # TARGET_ARCH != arm
LIBART_HOST_SRC_FILES := \
$(LIBART_COMMON_SRC_FILES) \
@@ -215,14 +224,11 @@ LIBART_HOST_SRC_FILES := \
ifeq ($(HOST_ARCH),x86)
LIBART_HOST_SRC_FILES += \
arch/x86/context_x86.cc \
- arch/x86/quick_entrypoints_init_x86.cc \
- arch/x86/quick_entrypoints_x86.S
-else # HOST_ARCH != x86
-$(error unsupported HOST_ARCH=$(HOST_ARCH))
-endif # HOST_ARCH != x86
-
-ifeq ($(HOST_ARCH),x86)
-LIBART_HOST_SRC_FILES += thread_x86.cc
+ arch/x86/entrypoints_init_x86.cc \
+ arch/x86/jni_entrypoints_x86.S \
+ arch/x86/portable_entrypoints_x86.S \
+ arch/x86/quick_entrypoints_x86.S \
+ arch/x86/thread_x86.cc
else # HOST_ARCH != x86
$(error unsupported HOST_ARCH=$(HOST_ARCH))
endif # HOST_ARCH != x86
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
new file mode 100644
index 0000000..ed655e9
--- /dev/null
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+
+#include "asm_support_arm.h"
+
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ .fnstart
+.endm
+
+.macro END name
+ .fnend
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
+#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
new file mode 100644
index 0000000..ed3d476
--- /dev/null
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+
+#include "asm_support.h"
+
+// Register holding suspend check count down.
+#define rSUSPEND r4
+// Register holding Thread::Current().
+#define rSELF r9
+// Offset of field Thread::suspend_count_ verified in InitCpu
+#define THREAD_FLAGS_OFFSET 0
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 12
+
+#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/quick_entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2f66b36..b71a158 100644
--- a/runtime/arch/arm/quick_entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -14,8 +14,10 @@
* limitations under the License.
*/
+#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/math_entrypoints.h"
namespace art {
@@ -130,108 +132,110 @@ extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception_from_code();
extern "C" void art_quick_throw_stack_overflow_from_code(void*);
-void InitEntryPoints(QuickEntryPoints* points) {
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
// Alloc
- points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
- points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
- points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
- points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
- points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
- points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+ qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
// Cast
- points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
- points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
- points->pCheckCastFromCode = art_quick_check_cast_from_code;
+ qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
+ qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
// DexCache
- points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
- points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
- points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
- points->pResolveStringFromCode = art_quick_resolve_string_from_code;
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
// Field
- points->pSet32Instance = art_quick_set32_instance_from_code;
- points->pSet32Static = art_quick_set32_static_from_code;
- points->pSet64Instance = art_quick_set64_instance_from_code;
- points->pSet64Static = art_quick_set64_static_from_code;
- points->pSetObjInstance = art_quick_set_obj_instance_from_code;
- points->pSetObjStatic = art_quick_set_obj_static_from_code;
- points->pGet32Instance = art_quick_get32_instance_from_code;
- points->pGet64Instance = art_quick_get64_instance_from_code;
- points->pGetObjInstance = art_quick_get_obj_instance_from_code;
- points->pGet32Static = art_quick_get32_static_from_code;
- points->pGet64Static = art_quick_get64_static_from_code;
- points->pGetObjStatic = art_quick_get_obj_static_from_code;
+ qpoints->pSet32Instance = art_quick_set32_instance_from_code;
+ qpoints->pSet32Static = art_quick_set32_static_from_code;
+ qpoints->pSet64Instance = art_quick_set64_instance_from_code;
+ qpoints->pSet64Static = art_quick_set64_static_from_code;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
+ qpoints->pGet32Instance = art_quick_get32_instance_from_code;
+ qpoints->pGet64Instance = art_quick_get64_instance_from_code;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ qpoints->pGet32Static = art_quick_get32_static_from_code;
+ qpoints->pGet64Static = art_quick_get64_static_from_code;
+ qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
// FillArray
- points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+ qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
// JNI
- points->pJniMethodStart = JniMethodStart;
- points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- points->pJniMethodEnd = JniMethodEnd;
- points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- points->pJniMethodEndWithReference = JniMethodEndWithReference;
- points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
// Locks
- points->pLockObjectFromCode = art_quick_lock_object_from_code;
- points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+ qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
+ qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
// Math
- points->pCmpgDouble = CmpgDouble;
- points->pCmpgFloat = CmpgFloat;
- points->pCmplDouble = CmplDouble;
- points->pCmplFloat = CmplFloat;
- points->pFmod = fmod;
- points->pSqrt = sqrt;
- points->pL2d = __aeabi_l2d;
- points->pFmodf = fmodf;
- points->pL2f = __aeabi_l2f;
- points->pD2iz = __aeabi_d2iz;
- points->pF2iz = __aeabi_f2iz;
- points->pIdivmod = __aeabi_idivmod;
- points->pD2l = art_d2l;
- points->pF2l = art_f2l;
- points->pLdiv = __aeabi_ldivmod;
- points->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3
- points->pLmul = art_quick_mul_long;
- points->pShlLong = art_quick_shl_long;
- points->pShrLong = art_quick_shr_long;
- points->pUshrLong = art_quick_ushr_long;
+ qpoints->pCmpgDouble = CmpgDouble;
+ qpoints->pCmpgFloat = CmpgFloat;
+ qpoints->pCmplDouble = CmplDouble;
+ qpoints->pCmplFloat = CmplFloat;
+ qpoints->pFmod = fmod;
+ qpoints->pSqrt = sqrt;
+ qpoints->pL2d = __aeabi_l2d;
+ qpoints->pFmodf = fmodf;
+ qpoints->pL2f = __aeabi_l2f;
+ qpoints->pD2iz = __aeabi_d2iz;
+ qpoints->pF2iz = __aeabi_f2iz;
+ qpoints->pIdivmod = __aeabi_idivmod;
+ qpoints->pD2l = art_d2l;
+ qpoints->pF2l = art_f2l;
+ qpoints->pLdiv = __aeabi_ldivmod;
+ qpoints->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3
+ qpoints->pLmul = art_quick_mul_long;
+ qpoints->pShlLong = art_quick_shl_long;
+ qpoints->pShrLong = art_quick_shr_long;
+ qpoints->pUshrLong = art_quick_ushr_long;
// Interpreter
- points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
- points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+ qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
// Intrinsics
- points->pIndexOf = art_quick_indexof;
- points->pMemcmp16 = __memcmp16;
- points->pStringCompareTo = art_quick_string_compareto;
- points->pMemcpy = memcpy;
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = __memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = memcpy;
// Invocation
- points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
- points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
- points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
- points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
- points->pCheckSuspendFromCode = CheckSuspendFromCode;
- points->pTestSuspendFromCode = art_quick_test_suspend;
+ qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
+ qpoints->pTestSuspendFromCode = art_quick_test_suspend;
// Throws
- points->pDeliverException = art_quick_deliver_exception_from_code;
- points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
- points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
- points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
- points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
- points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+ qpoints->pDeliverException = art_quick_deliver_exception_from_code;
+ qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+
+ // Portable
+ ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
};
} // namespace art
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
new file mode 100644
index 0000000..0a0d06a
--- /dev/null
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_arm.S"
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+ .extern artFindNativeMethod
+ENTRY art_jni_dlsym_lookup_stub
+ push {r0, r1, r2, r3, lr} @ spill regs
+ .save {r0, r1, r2, r3, lr}
+ .pad #20
+ .cfi_adjust_cfa_offset 20
+ sub sp, #12 @ pad stack pointer to align frame
+ .pad #12
+ .cfi_adjust_cfa_offset 12
+ mov r0, r9 @ pass Thread::Current
+ blx artFindNativeMethod @ (Thread*)
+ mov r12, r0 @ save result in r12
+ add sp, #12 @ restore stack pointer
+ .cfi_adjust_cfa_offset -12
+ pop {r0, r1, r2, r3, lr} @ restore regs
+ .cfi_adjust_cfa_offset -20
+ cmp r12, #0 @ is method code null?
+ bxne r12 @ if non-null, tail call to method's code
+ bx lr @ otherwise, return to caller to handle exception
+END art_jni_dlsym_lookup_stub
+
+ /*
+ * Entry point of native methods when JNI bug compatibility is enabled.
+ */
+ .extern artWorkAroundAppJniBugs
+ENTRY art_quick_work_around_app_jni_bugs
+ @ save registers that may contain arguments and LR that will be crushed by a call
+ push {r0-r3, lr}
+ .save {r0-r3, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ sub sp, #12 @ 3 words of space for alignment
+ mov r0, r9 @ pass Thread::Current
+ mov r1, sp @ pass SP
+ bl artWorkAroundAppJniBugs @ (Thread*, SP)
+ add sp, #12 @ rewind stack
+ mov r12, r0 @ save target address
+ pop {r0-r3, lr} @ restore possibly modified argument registers
+ .cfi_adjust_cfa_offset -16
+ bx r12 @ tail call into JNI routine
+END art_quick_work_around_app_jni_bugs
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
new file mode 100644
index 0000000..4cc6654
--- /dev/null
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_arm.S"
+
+ /*
+ * Portable invocation stub.
+ * On entry:
+ * r0 = method pointer
+ * r1 = argument array or NULL for no argument methods
+ * r2 = size of argument array in bytes
+ * r3 = (managed) thread pointer
+ * [sp] = JValue* result
+ * [sp + 4] = result type char
+ */
+ENTRY art_portable_invoke_stub
+ push {r0, r4, r5, r9, r11, lr} @ spill regs
+ .save {r0, r4, r5, r9, r11, lr}
+ .pad #24
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r4, 4
+ .cfi_rel_offset r5, 8
+ .cfi_rel_offset r9, 12
+ .cfi_rel_offset r11, 16
+ .cfi_rel_offset lr, 20
+ mov r11, sp @ save the stack pointer
+ .cfi_def_cfa_register r11
+ mov r9, r3 @ move managed thread pointer into r9
+ mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
+ add r5, r2, #16 @ create space for method pointer in frame
+ and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
+ sub sp, r5 @ reserve stack space for argument array
+ add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
+ bl memcpy @ memcpy (dest, src, bytes)
+ ldr r0, [r11] @ restore method*
+ ldr r1, [sp, #4] @ copy arg value for r1
+ ldr r2, [sp, #8] @ copy arg value for r2
+ ldr r3, [sp, #12] @ copy arg value for r3
+ mov ip, #0 @ set ip to 0
+ str ip, [sp] @ store NULL for method* at bottom of frame
+ add sp, #16 @ first 4 args are not passed on stack for portable
+ ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
+ blx ip @ call the method
+ mov sp, r11 @ restore the stack pointer
+ ldr ip, [sp, #24] @ load the result pointer
+ strd r0, [ip] @ store r0/r1 into result pointer
+ pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
+ .cfi_adjust_cfa_offset -24
+ bx lr
+END art_portable_invoke_stub
+
+ .extern artPortableProxyInvokeHandler
+ENTRY art_portable_proxy_invoke_handler
+ @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
+ @ TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
+ push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
+ .save {r1-r3, r5-r8, r10-r11, lr}
+ .cfi_adjust_cfa_offset 40
+ .cfi_rel_offset r1, 0
+ .cfi_rel_offset r2, 4
+ .cfi_rel_offset r3, 8
+ .cfi_rel_offset r5, 12
+ .cfi_rel_offset r6, 16
+ .cfi_rel_offset r7, 20
+ .cfi_rel_offset r8, 24
+ .cfi_rel_offset r10, 28
+ .cfi_rel_offset r11, 32
+ .cfi_rel_offset lr, 36
+ sub sp, #8 @ 2 words of space, bottom word will hold Method*
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ @ Begin argument set up.
+ str r0, [sp, #0] @ place proxy method at bottom of frame
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr lr, [sp, #44] @ restore lr
+ add sp, #48 @ pop frame
+ .cfi_adjust_cfa_offset -48
+ bx lr @ return
+END art_portable_proxy_invoke_handler
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index f19e8ba..9b8d238 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -14,29 +14,13 @@
* limitations under the License.
*/
-#include "asm_support.h"
+#include "asm_support_arm.S"
/* Deliver the given exception */
.extern artDeliverExceptionFromCode
/* Deliver an exception pending on a thread */
.extern artDeliverPendingException
-.macro ENTRY name
- .type \name, #function
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- .fnstart
-.endm
-
-.macro END name
- .fnend
- .cfi_endproc
- .size \name, .-\name
-.endm
-
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
@@ -247,53 +231,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvoke
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Portable invocation stub.
- * On entry:
- * r0 = method pointer
- * r1 = argument array or NULL for no argument methods
- * r2 = size of argument array in bytes
- * r3 = (managed) thread pointer
- * [sp] = JValue* result
- * [sp + 4] = result type char
- */
-ENTRY art_portable_invoke_stub
- push {r0, r4, r5, r9, r11, lr} @ spill regs
- .save {r0, r4, r5, r9, r11, lr}
- .pad #24
- .cfi_adjust_cfa_offset 24
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r9, 12
- .cfi_rel_offset r11, 16
- .cfi_rel_offset lr, 20
- mov r11, sp @ save the stack pointer
- .cfi_def_cfa_register r11
- mov r9, r3 @ move managed thread pointer into r9
- mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
- add r5, r2, #16 @ create space for method pointer in frame
- and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
- sub sp, r5 @ reserve stack space for argument array
- add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
- bl memcpy @ memcpy (dest, src, bytes)
- ldr r0, [r11] @ restore method*
- ldr r1, [sp, #4] @ copy arg value for r1
- ldr r2, [sp, #8] @ copy arg value for r2
- ldr r3, [sp, #12] @ copy arg value for r3
- mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
- add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
- blx ip @ call the method
- mov sp, r11 @ restore the stack pointer
- ldr ip, [sp, #24] @ load the result pointer
- strd r0, [ip] @ store r0/r1 into result pointer
- pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
- .cfi_adjust_cfa_offset -24
- bx lr
-END art_portable_invoke_stub
-
- /*
* Quick invocation stub.
* On entry:
* r0 = method pointer
@@ -353,30 +290,6 @@ ENTRY art_quick_do_long_jump
END art_quick_do_long_jump
/*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
- @ save registers that may contain arguments and LR that will be crushed by a call
- push {r0-r3, lr}
- .save {r0-r3, lr}
- .cfi_adjust_cfa_offset 16
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r1, 4
- .cfi_rel_offset r2, 8
- .cfi_rel_offset r3, 12
- sub sp, #12 @ 3 words of space for alignment
- mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- bl artWorkAroundAppJniBugs @ (Thread*, SP)
- add sp, #12 @ rewind stack
- mov r12, r0 @ save target address
- pop {r0-r3, lr} @ restore possibly modified argument registers
- .cfi_adjust_cfa_offset -16
- bx r12 @ tail call into JNI routine
-END art_quick_work_around_app_jni_bugs
-
- /*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
@@ -906,20 +819,6 @@ ENTRY art_quick_test_suspend
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
- .extern artPortableProxyInvokeHandler
-ENTRY art_portable_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- str r0, [sp, #0] @ place proxy method at bottom of frame
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
- ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ pop frame
- .cfi_adjust_cfa_offset -48
- bx lr @ return
-END art_portable_proxy_invoke_handler
-
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The
@@ -1045,30 +944,6 @@ ENTRY art_quick_abstract_method_error_stub
END art_quick_abstract_method_error_stub
/*
- * Jni dlsym lookup stub.
- */
- .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
- push {r0, r1, r2, r3, lr} @ spill regs
- .save {r0, r1, r2, r3, lr}
- .pad #20
- .cfi_adjust_cfa_offset 20
- sub sp, #12 @ pad stack pointer to align frame
- .pad #12
- .cfi_adjust_cfa_offset 12
- mov r0, r9 @ pass Thread::Current
- blx artFindNativeMethod @ (Thread*)
- mov r12, r0 @ save result in r12
- add sp, #12 @ restore stack pointer
- .cfi_adjust_cfa_offset -12
- pop {r0, r1, r2, r3, lr} @ restore regs
- .cfi_adjust_cfa_offset -20
- cmp r12, #0 @ is method code null?
- bxne r12 @ if non-null, tail call to method's code
- bx lr @ otherwise, return to caller to handle exception
-END art_jni_dlsym_lookup_stub
-
- /*
* Signed 64-bit integer multiply.
*
* Consider WXxYZ (r1r0 x r3r2) with a long multiply:
diff --git a/runtime/thread_arm.cc b/runtime/arch/arm/thread_arm.cc
index 0ef26bf..ea908be 100644
--- a/runtime/thread_arm.cc
+++ b/runtime/arch/arm/thread_arm.cc
@@ -16,8 +16,8 @@
#include "thread.h"
-#include "asm_support.h"
-#include "base/macros.h"
+#include "asm_support_arm.h"
+#include "base/logging.h"
namespace art {
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
new file mode 100644
index 0000000..8a34b9d
--- /dev/null
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
+#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
+
+#include "asm_support_mips.h"
+
+ /* Cache alignment for function entry */
+.macro ENTRY name
+ .type \name, %function
+ .global \name
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
+ /* Generates $gp for function calls */
+.macro GENERATE_GLOBAL_POINTER
+ .cpload $t9
+.endm
+
+#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
new file mode 100644
index 0000000..9a66352
--- /dev/null
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+
+#include "asm_support.h"
+
+// Register holding suspend check count down.
+#define rSUSPEND $s0
+// Register holding Thread::Current().
+#define rSELF $s1
+// Offset of field Thread::suspend_count_ verified in InitCpu
+#define THREAD_FLAGS_OFFSET 0
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 12
+
+#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/quick_entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index d494c65..0a62a40 100644
--- a/runtime/arch/mips/quick_entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -14,8 +14,10 @@
* limitations under the License.
*/
+#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/math_entrypoints.h"
namespace art {
@@ -132,107 +134,109 @@ extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception_from_code();
extern "C" void art_quick_throw_stack_overflow_from_code(void*);
-void InitEntryPoints(QuickEntryPoints* points) {
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
// Alloc
- points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
- points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
- points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
- points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
- points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
- points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+ qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
// Cast
- points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
- points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
- points->pCheckCastFromCode = art_quick_check_cast_from_code;
+ qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
+ qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
// DexCache
- points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
- points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
- points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
- points->pResolveStringFromCode = art_quick_resolve_string_from_code;
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
// Field
- points->pSet32Instance = art_quick_set32_instance_from_code;
- points->pSet32Static = art_quick_set32_static_from_code;
- points->pSet64Instance = art_quick_set64_instance_from_code;
- points->pSet64Static = art_quick_set64_static_from_code;
- points->pSetObjInstance = art_quick_set_obj_instance_from_code;
- points->pSetObjStatic = art_quick_set_obj_static_from_code;
- points->pGet32Instance = art_quick_get32_instance_from_code;
- points->pGet64Instance = art_quick_get64_instance_from_code;
- points->pGetObjInstance = art_quick_get_obj_instance_from_code;
- points->pGet32Static = art_quick_get32_static_from_code;
- points->pGet64Static = art_quick_get64_static_from_code;
- points->pGetObjStatic = art_quick_get_obj_static_from_code;
+ qpoints->pSet32Instance = art_quick_set32_instance_from_code;
+ qpoints->pSet32Static = art_quick_set32_static_from_code;
+ qpoints->pSet64Instance = art_quick_set64_instance_from_code;
+ qpoints->pSet64Static = art_quick_set64_static_from_code;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
+ qpoints->pGet32Instance = art_quick_get32_instance_from_code;
+ qpoints->pGet64Instance = art_quick_get64_instance_from_code;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ qpoints->pGet32Static = art_quick_get32_static_from_code;
+ qpoints->pGet64Static = art_quick_get64_static_from_code;
+ qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
// FillArray
- points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+ qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
// JNI
- points->pJniMethodStart = JniMethodStart;
- points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- points->pJniMethodEnd = JniMethodEnd;
- points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- points->pJniMethodEndWithReference = JniMethodEndWithReference;
- points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
// Locks
- points->pLockObjectFromCode = art_quick_lock_object_from_code;
- points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+ qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
+ qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
// Math
- points->pCmpgDouble = CmpgDouble;
- points->pCmpgFloat = CmpgFloat;
- points->pCmplDouble = CmplDouble;
- points->pCmplFloat = CmplFloat;
- points->pFmod = fmod;
- points->pL2d = __floatdidf;
- points->pFmodf = fmodf;
- points->pL2f = __floatdisf;
- points->pD2iz = __fixdfsi;
- points->pF2iz = __fixsfsi;
- points->pIdivmod = NULL;
- points->pD2l = art_d2l;
- points->pF2l = art_f2l;
- points->pLdiv = artLdivFromCode;
- points->pLdivmod = artLdivmodFromCode;
- points->pLmul = artLmulFromCode;
- points->pShlLong = art_quick_shl_long;
- points->pShrLong = art_quick_shr_long;
- points->pUshrLong = art_quick_ushr_long;
+ qpoints->pCmpgDouble = CmpgDouble;
+ qpoints->pCmpgFloat = CmpgFloat;
+ qpoints->pCmplDouble = CmplDouble;
+ qpoints->pCmplFloat = CmplFloat;
+ qpoints->pFmod = fmod;
+ qpoints->pL2d = __floatdidf;
+ qpoints->pFmodf = fmodf;
+ qpoints->pL2f = __floatdisf;
+ qpoints->pD2iz = __fixdfsi;
+ qpoints->pF2iz = __fixsfsi;
+ qpoints->pIdivmod = NULL;
+ qpoints->pD2l = art_d2l;
+ qpoints->pF2l = art_f2l;
+ qpoints->pLdiv = artLdivFromCode;
+ qpoints->pLdivmod = artLdivmodFromCode;
+ qpoints->pLmul = artLmulFromCode;
+ qpoints->pShlLong = art_quick_shl_long;
+ qpoints->pShrLong = art_quick_shr_long;
+ qpoints->pUshrLong = art_quick_ushr_long;
// Interpreter
- points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
- points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+ qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
// Intrinsics
- points->pIndexOf = art_quick_indexof;
- points->pMemcmp16 = __memcmp16;
- points->pStringCompareTo = art_quick_string_compareto;
- points->pMemcpy = memcpy;
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = __memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = memcpy;
// Invocation
- points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
- points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
- points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
- points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
- points->pCheckSuspendFromCode = CheckSuspendFromCode;
- points->pTestSuspendFromCode = art_quick_test_suspend;
+ qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
+ qpoints->pTestSuspendFromCode = art_quick_test_suspend;
// Throws
- points->pDeliverException = art_quick_deliver_exception_from_code;
- points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
- points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
- points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
- points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
- points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+ qpoints->pDeliverException = art_quick_deliver_exception_from_code;
+ qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+
+ // Portable
+ ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
};
} // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
new file mode 100644
index 0000000..fca6d77
--- /dev/null
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_mips.S"
+
+ .set noreorder
+ .balign 4
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+ .extern artFindNativeMethod
+ENTRY art_jni_dlsym_lookup_stub
+ GENERATE_GLOBAL_POINTER
+ addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 16($sp)
+ .cfi_rel_offset 31, 16
+ sw $a3, 12($sp)
+ .cfi_rel_offset 7, 12
+ sw $a2, 8($sp)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ sw $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+ jal artFindNativeMethod # (Thread*)
+ move $a0, $s1 # pass Thread::Current()
+ lw $a0, 0($sp) # restore registers from stack
+ lw $a1, 4($sp)
+ lw $a2, 8($sp)
+ lw $a3, 12($sp)
+ lw $ra, 16($sp)
+ beq $v0, $zero, no_native_code_found
+ addiu $sp, $sp, 32 # restore the stack
+ .cfi_adjust_cfa_offset -32
+ move $t9, $v0 # put method code result in $t9
+ jr $t9 # leaf call to method's code
+ nop
+no_native_code_found:
+ jr $ra
+ nop
+END art_jni_dlsym_lookup_stub
+
+ /*
+ * Entry point of native methods when JNI bug compatibility is enabled.
+ */
+ .extern artWorkAroundAppJniBugs
+ENTRY art_quick_work_around_app_jni_bugs
+ GENERATE_GLOBAL_POINTER
+ # save registers that may contain arguments and LR that will be crushed by a call
+ addiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 28($sp)
+ .cfi_rel_offset 31, 28
+ sw $a3, 24($sp)
+ .cfi_rel_offset 7, 28
+ sw $a2, 20($sp)
+ .cfi_rel_offset 6, 28
+ sw $a1, 16($sp)
+ .cfi_rel_offset 5, 28
+ sw $a0, 12($sp)
+ .cfi_rel_offset 4, 28
+ move $a0, rSELF # pass Thread::Current
+ jal artWorkAroundAppJniBugs # (Thread*, $sp)
+ move $a1, $sp # pass $sp
+ move $t9, $v0 # save target address
+ lw $a0, 12($sp)
+ lw $a1, 16($sp)
+ lw $a2, 20($sp)
+ lw $a3, 24($sp)
+ lw $ra, 28($sp)
+ jr $t9 # tail call into JNI routine
+ addiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+END art_quick_work_around_app_jni_bugs
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
new file mode 100644
index 0000000..e7a9b0f
--- /dev/null
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_mips.S"
+
+ .set noreorder
+ .balign 4
+
+ .extern artPortableProxyInvokeHandler
+ENTRY art_portable_proxy_invoke_handler
+ GENERATE_GLOBAL_POINTER
+ # Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
+ # TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
+ addiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sw $ra, 60($sp)
+ .cfi_rel_offset 31, 60
+ sw $s8, 56($sp)
+ .cfi_rel_offset 30, 56
+ sw $gp, 52($sp)
+ .cfi_rel_offset 28, 52
+ sw $s7, 48($sp)
+ .cfi_rel_offset 23, 48
+ sw $s6, 44($sp)
+ .cfi_rel_offset 22, 44
+ sw $s5, 40($sp)
+ .cfi_rel_offset 21, 40
+ sw $s4, 36($sp)
+ .cfi_rel_offset 20, 36
+ sw $s3, 32($sp)
+ .cfi_rel_offset 19, 32
+ sw $s2, 28($sp)
+ .cfi_rel_offset 18, 28
+ sw $a3, 12($sp)
+ .cfi_rel_offset 7, 12
+ sw $a2, 8($sp)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ # Begin argument set up.
+ sw $a0, 0($sp) # place proxy method at bottom of frame
+ move $a2, rSELF # pass Thread::Current
+ jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
+ move $a3, $sp # pass $sp
+ lw $ra, 60($sp) # restore $ra
+ jr $ra
+ addiu $sp, $sp, 64 # pop frame
+ .cfi_adjust_cfa_offset -64
+END art_portable_proxy_invoke_handler
+
+ /*
+ * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
+ */
+ .extern artThrowAbstractMethodErrorFromCode
+ENTRY art_portable_abstract_method_error_stub
+ GENERATE_GLOBAL_POINTER
+ la $t9, artThrowAbstractMethodErrorFromCode
+ jr $t9 # (Method*, Thread*, SP)
+ move $a1, $s1 # pass Thread::Current
+END art_portable_abstract_method_error_stub
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 45d583e..d32a2b4 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "asm_support.h"
+#include "asm_support_mips.S"
.set noreorder
.balign 4
@@ -24,25 +24,6 @@
/* Deliver an exception pending on a thread */
.extern artDeliverPendingExceptionFromCode
- /* Cache alignment for function entry */
-.macro ENTRY name
- .type \name, %function
- .global \name
- .balign 16
-\name:
- .cfi_startproc
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
-
- /* Generates $gp for function calls */
-.macro GENERATE_GLOBAL_POINTER
- .cpload $t9
-.endm
-
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
@@ -481,39 +462,6 @@ END art_quick_invoke_stub
.size art_portable_invoke_stub, .-art_portable_invoke_stub
/*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
- GENERATE_GLOBAL_POINTER
- # save registers that may contain arguments and LR that will be crushed by a call
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $ra, 28($sp)
- .cfi_rel_offset 31, 28
- sw $a3, 24($sp)
- .cfi_rel_offset 7, 28
- sw $a2, 20($sp)
- .cfi_rel_offset 6, 28
- sw $a1, 16($sp)
- .cfi_rel_offset 5, 28
- sw $a0, 12($sp)
- .cfi_rel_offset 4, 28
- move $a0, rSELF # pass Thread::Current
- jal artWorkAroundAppJniBugs # (Thread*, $sp)
- move $a1, $sp # pass $sp
- move $t9, $v0 # save target address
- lw $a0, 12($sp)
- lw $a1, 16($sp)
- lw $a2, 20($sp)
- lw $a3, 24($sp)
- lw $ra, 28($sp)
- jr $t9 # tail call into JNI routine
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-END art_quick_work_around_app_jni_bugs
-
- /*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
@@ -912,20 +860,6 @@ ENTRY art_quick_test_suspend
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
- .extern artPortableProxyInvokeHandler
-ENTRY art_portable_proxy_invoke_handler
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- sw $a0, 0($sp) # place proxy method at bottom of frame
- move $a2, rSELF # pass Thread::Current
- jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
- lw $ra, 60($sp) # restore $ra
- jr $ra
- addiu $sp, $sp, 64 # pop frame
- .cfi_adjust_cfa_offset -64
-END art_portable_proxy_invoke_handler
-
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method; r1, r2 and r3 may contain arguments.
@@ -1044,17 +978,6 @@ ENTRY art_quick_deoptimize
END art_quick_deoptimize
/*
- * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
- */
- .extern artThrowAbstractMethodErrorFromCode
-ENTRY art_portable_abstract_method_error_stub
- GENERATE_GLOBAL_POINTER
- la $t9, artThrowAbstractMethodErrorFromCode
- jr $t9 # (Method*, Thread*, SP)
- move $a1, $s1 # pass Thread::Current
-END art_portable_abstract_method_error_stub
-
- /*
* Quick abstract method error stub. $a0 contains method* on entry.
*/
ENTRY art_quick_abstract_method_error_stub
@@ -1067,42 +990,6 @@ ENTRY art_quick_abstract_method_error_stub
END art_quick_abstract_method_error_stub
/*
- * Jni dlsym lookup stub.
- */
- .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
- GENERATE_GLOBAL_POINTER
- addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
- .cfi_adjust_cfa_offset 32
- sw $ra, 16($sp)
- .cfi_rel_offset 31, 16
- sw $a3, 12($sp)
- .cfi_rel_offset 7, 12
- sw $a2, 8($sp)
- .cfi_rel_offset 6, 8
- sw $a1, 4($sp)
- .cfi_rel_offset 5, 4
- sw $a0, 0($sp)
- .cfi_rel_offset 4, 0
- jal artFindNativeMethod # (Thread*)
- move $a0, $s1 # pass Thread::Current()
- lw $a0, 0($sp) # restore registers from stack
- lw $a1, 4($sp)
- lw $a2, 8($sp)
- lw $a3, 12($sp)
- lw $ra, 16($sp)
- beq $v0, $zero, no_native_code_found
- addiu $sp, $sp, 32 # restore the stack
- .cfi_adjust_cfa_offset -32
- move $t9, $v0 # put method code result in $t9
- jr $t9 # leaf call to method's code
- nop
-no_native_code_found:
- jr $ra
- nop
-END art_jni_dlsym_lookup_stub
-
- /*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
diff --git a/runtime/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
index 0ef26bf..7364de0 100644
--- a/runtime/thread_mips.cc
+++ b/runtime/arch/mips/thread_mips.cc
@@ -16,8 +16,8 @@
#include "thread.h"
-#include "asm_support.h"
-#include "base/macros.h"
+#include "asm_support_mips.h"
+#include "base/logging.h"
namespace art {
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
new file mode 100644
index 0000000..7e6dce9
--- /dev/null
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+
+#include "asm_support_x86.h"
+
+#if defined(__APPLE__)
+ // Mac OS' as(1) doesn't let you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name
+ #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
+ #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
+ #define END_MACRO .endmacro
+
+ // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
+ // are mangled with an extra underscore prefix. The use of $x for arguments
+ // mean that literals need to be represented with $$x in macros.
+ #define SYMBOL(name) _ ## name
+ #define VAR(name,index) SYMBOL($index)
+ #define REG_VAR(name,index) %$index
+ #define CALL_MACRO(name,index) $index
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $$value
+#else
+ // Regular gas(1) lets you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
+ #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
+ #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+ #define END_MACRO .endm
+
+ // Regular gas(1) uses \argument_name for macro arguments.
+ // We need to turn on alternate macro syntax so we can use & instead or the preprocessor
+ // will screw us by inserting a space between the \ and the name. Even in this mode there's
+ // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
+ // special character meaning care needs to be taken when passing registers as macro arguments.
+ .altmacro
+ #define SYMBOL(name) name
+ #define VAR(name,index) name&
+ #define REG_VAR(name,index) %name
+ #define CALL_MACRO(name,index) name&
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $value
+#endif
+
+ /* Cache alignment for function entry */
+MACRO0(ALIGN_FUNCTION_ENTRY)
+ .balign 16
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION, c_name)
+ .type VAR(c_name, 0), @function
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ .cfi_startproc
+END_MACRO
+
+MACRO1(END_FUNCTION, c_name)
+ .cfi_endproc
+ .size \c_name, .-\c_name
+END_MACRO
+
+MACRO1(PUSH, reg)
+ pushl REG_VAR(reg, 0)
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset REG_VAR(reg, 0), 0
+END_MACRO
+
+MACRO1(POP, reg)
+ popl REG_VAR(reg,0)
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore REG_VAR(reg,0)
+END_MACRO
+
+#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
new file mode 100644
index 0000000..1092910
--- /dev/null
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+
+#include "asm_support.h"
+
+// Offset of field Thread::self_ verified in InitCpu
+#define THREAD_SELF_OFFSET 40
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 12
+
+#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/quick_entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index cced916..d47dfef 100644
--- a/runtime/arch/x86/quick_entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -14,8 +14,9 @@
* limitations under the License.
*/
+#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
namespace art {
@@ -115,107 +116,109 @@ extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception_from_code();
extern "C" void art_quick_throw_stack_overflow_from_code(void*);
-void InitEntryPoints(QuickEntryPoints* points) {
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
// Alloc
- points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
- points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
- points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
- points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
- points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
- points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+ qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
// Cast
- points->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
- points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
- points->pCheckCastFromCode = art_quick_check_cast_from_code;
+ qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
+ qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
// DexCache
- points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
- points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
- points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
- points->pResolveStringFromCode = art_quick_resolve_string_from_code;
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
// Field
- points->pSet32Instance = art_quick_set32_instance_from_code;
- points->pSet32Static = art_quick_set32_static_from_code;
- points->pSet64Instance = art_quick_set64_instance_from_code;
- points->pSet64Static = art_quick_set64_static_from_code;
- points->pSetObjInstance = art_quick_set_obj_instance_from_code;
- points->pSetObjStatic = art_quick_set_obj_static_from_code;
- points->pGet32Instance = art_quick_get32_instance_from_code;
- points->pGet64Instance = art_quick_get64_instance_from_code;
- points->pGetObjInstance = art_quick_get_obj_instance_from_code;
- points->pGet32Static = art_quick_get32_static_from_code;
- points->pGet64Static = art_quick_get64_static_from_code;
- points->pGetObjStatic = art_quick_get_obj_static_from_code;
+ qpoints->pSet32Instance = art_quick_set32_instance_from_code;
+ qpoints->pSet32Static = art_quick_set32_static_from_code;
+ qpoints->pSet64Instance = art_quick_set64_instance_from_code;
+ qpoints->pSet64Static = art_quick_set64_static_from_code;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
+ qpoints->pGet32Instance = art_quick_get32_instance_from_code;
+ qpoints->pGet64Instance = art_quick_get64_instance_from_code;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ qpoints->pGet32Static = art_quick_get32_static_from_code;
+ qpoints->pGet64Static = art_quick_get64_static_from_code;
+ qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
// FillArray
- points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+ qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
// JNI
- points->pJniMethodStart = JniMethodStart;
- points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- points->pJniMethodEnd = JniMethodEnd;
- points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- points->pJniMethodEndWithReference = JniMethodEndWithReference;
- points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
// Locks
- points->pLockObjectFromCode = art_quick_lock_object_from_code;
- points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+ qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
+ qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
// Math
// points->pCmpgDouble = NULL; // Not needed on x86.
// points->pCmpgFloat = NULL; // Not needed on x86.
// points->pCmplDouble = NULL; // Not needed on x86.
// points->pCmplFloat = NULL; // Not needed on x86.
- points->pFmod = art_quick_fmod_from_code;
- points->pL2d = art_quick_l2d_from_code;
- points->pFmodf = art_quick_fmodf_from_code;
- points->pL2f = art_quick_l2f_from_code;
+ qpoints->pFmod = art_quick_fmod_from_code;
+ qpoints->pL2d = art_quick_l2d_from_code;
+ qpoints->pFmodf = art_quick_fmodf_from_code;
+ qpoints->pL2f = art_quick_l2f_from_code;
// points->pD2iz = NULL; // Not needed on x86.
// points->pF2iz = NULL; // Not needed on x86.
- points->pIdivmod = art_quick_idivmod_from_code;
- points->pD2l = art_quick_d2l_from_code;
- points->pF2l = art_quick_f2l_from_code;
- points->pLdiv = art_quick_ldiv_from_code;
- points->pLdivmod = art_quick_ldivmod_from_code;
- points->pLmul = art_quick_lmul_from_code;
- points->pShlLong = art_quick_lshl_from_code;
- points->pShrLong = art_quick_lshr_from_code;
- points->pUshrLong = art_quick_lushr_from_code;
+ qpoints->pIdivmod = art_quick_idivmod_from_code;
+ qpoints->pD2l = art_quick_d2l_from_code;
+ qpoints->pF2l = art_quick_f2l_from_code;
+ qpoints->pLdiv = art_quick_ldiv_from_code;
+ qpoints->pLdivmod = art_quick_ldivmod_from_code;
+ qpoints->pLmul = art_quick_lmul_from_code;
+ qpoints->pShlLong = art_quick_lshl_from_code;
+ qpoints->pShrLong = art_quick_lshr_from_code;
+ qpoints->pUshrLong = art_quick_lushr_from_code;
// Interpreter
- points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
- points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+ qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
// Intrinsics
- points->pIndexOf = art_quick_indexof;
- points->pMemcmp16 = art_quick_memcmp16;
- points->pStringCompareTo = art_quick_string_compareto;
- points->pMemcpy = art_quick_memcpy;
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = art_quick_memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = art_quick_memcpy;
// Invocation
- points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
- points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
- points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
- points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
- points->pCheckSuspendFromCode = CheckSuspendFromCode;
- points->pTestSuspendFromCode = art_quick_test_suspend;
+ qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
+ qpoints->pTestSuspendFromCode = art_quick_test_suspend;
// Throws
- points->pDeliverException = art_quick_deliver_exception_from_code;
- points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
- points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
- points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
- points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
- points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+ qpoints->pDeliverException = art_quick_deliver_exception_from_code;
+ qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+
+ // Portable
+ ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
};
} // namespace art
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
new file mode 100644
index 0000000..e9c88fe
--- /dev/null
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86.S"
+
+ /*
+ * Portable resolution trampoline.
+ */
+DEFINE_FUNCTION art_jni_dlsym_lookup_stub
+ subl LITERAL(8), %esp // align stack
+ .cfi_adjust_cfa_offset 8
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ call SYMBOL(artFindNativeMethod) // (Thread*)
+ addl LITERAL(12), %esp // restore the stack
+ .cfi_adjust_cfa_offset -12
+ cmpl LITERAL(0), %eax // check if returned method code is null
+ je no_native_code_found // if null, jump to return to handle
+ jmp *%eax // otherwise, tail call to intended method
+no_native_code_found:
+ ret
+END_FUNCTION art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
new file mode 100644
index 0000000..a0fca6c
--- /dev/null
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86.S"
+
+ /*
+ * Portable invocation stub.
+ * On entry:
+ * [sp] = return address
+ * [sp + 4] = method pointer
+ * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 12] = size of argument array in bytes
+ * [sp + 16] = (managed) thread pointer
+ * [sp + 20] = JValue* result
+ * [sp + 24] = result type char
+ */
+DEFINE_FUNCTION art_portable_invoke_stub
+ PUSH ebp // save ebp
+ PUSH ebx // save ebx
+ mov %esp, %ebp // copy value of stack pointer into base pointer
+ .cfi_def_cfa_register ebp
+ mov 20(%ebp), %ebx // get arg array size
+ addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
+ andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
+ subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
+ subl %ebx, %esp // reserve stack space for argument array
+ lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
+ pushl 20(%ebp) // push size of region to memcpy
+ pushl 16(%ebp) // push arg array as source of memcpy
+ pushl %eax // push stack pointer as destination of memcpy
+ call SYMBOL(memcpy) // (void*, const void*, size_t)
+ addl LITERAL(12), %esp // pop arguments to memcpy
+ mov 12(%ebp), %eax // move method pointer into eax
+ mov %eax, (%esp) // push method pointer onto stack
+ call *METHOD_CODE_OFFSET(%eax) // call the method
+ mov %ebp, %esp // restore stack pointer
+ POP ebx // pop ebx
+ POP ebp // pop ebp
+ mov 20(%esp), %ecx // get result pointer
+ cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
+ je return_double_portable
+ cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
+ je return_float_portable
+ mov %eax, (%ecx) // store the result
+ mov %edx, 4(%ecx) // store the other half of the result
+ ret
+return_double_portable:
+ fstpl (%ecx) // store the floating point result as double
+ ret
+return_float_portable:
+ fstps (%ecx) // store the floating point result as float
+ ret
+END_FUNCTION art_portable_invoke_stub
+
+DEFINE_FUNCTION art_portable_proxy_invoke_handler
+ // Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
+ // TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
+ PUSH edi // Save callee saves
+ PUSH esi
+ PUSH ebp
+ PUSH ebx // Save args
+ PUSH edx
+ PUSH ecx
+ PUSH eax // Align stack, eax will be clobbered by Method*
+ // Begin argument set up.
+ PUSH esp // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass receiver
+ PUSH eax // pass proxy method
+ call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ movd %eax, %xmm0 // place return value also into floating point return value
+ movd %edx, %xmm1
+ punpckldq %xmm1, %xmm0
+ addl LITERAL(44), %esp // pop arguments
+ .cfi_adjust_cfa_offset -44
+ ret
+END_FUNCTION art_portable_proxy_invoke_handler
+
+ /*
+ * Portable abstract method error stub. method* is at %esp + 4 on entry.
+ */
+DEFINE_FUNCTION art_portable_abstract_method_error_stub
+ PUSH ebp
+ movl %esp, %ebp // Remember SP.
+ .cfi_def_cfa_register ebp
+ subl LITERAL(12), %esp // Align stack.
+ PUSH esp // Pass sp (not used).
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ pushl 8(%ebp) // Pass Method*.
+ call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP)
+ leave // Restore the stack and %ebp.
+ .cfi_def_cfa esp, 4
+ .cfi_restore ebp
+ ret // Return to caller to handle pending exception.
+END_FUNCTION art_portable_abstract_method_error_stub
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index ee6db0c..89ea71a 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -14,76 +14,7 @@
* limitations under the License.
*/
-#include "asm_support.h"
-
-#if defined(__APPLE__)
- // Mac OS' as(1) doesn't let you name macro parameters.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name
- #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
- #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
- #define END_MACRO .endmacro
-
- // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
- // are mangled with an extra underscore prefix. The use of $x for arguments
- // mean that literals need to be represented with $$x in macros.
- #define SYMBOL(name) _ ## name
- #define VAR(name,index) SYMBOL($index)
- #define REG_VAR(name,index) %$index
- #define CALL_MACRO(name,index) $index
- #define LITERAL(value) $value
- #define MACRO_LITERAL(value) $$value
-#else
- // Regular gas(1) lets you name macro parameters.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
- #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
- #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
- #define END_MACRO .endm
-
- // Regular gas(1) uses \argument_name for macro arguments.
- // We need to turn on alternate macro syntax so we can use & instead or the preprocessor
- // will screw us by inserting a space between the \ and the name. Even in this mode there's
- // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
- // special character meaning care needs to be taken when passing registers as macro arguments.
- .altmacro
- #define SYMBOL(name) name
- #define VAR(name,index) name&
- #define REG_VAR(name,index) %name
- #define CALL_MACRO(name,index) name&
- #define LITERAL(value) $value
- #define MACRO_LITERAL(value) $value
-#endif
-
- /* Cache alignment for function entry */
-MACRO0(ALIGN_FUNCTION_ENTRY)
- .balign 16
-END_MACRO
-
-MACRO1(DEFINE_FUNCTION, c_name)
- .type VAR(c_name, 0), @function
- .globl VAR(c_name, 0)
- ALIGN_FUNCTION_ENTRY
-VAR(c_name, 0):
- .cfi_startproc
-END_MACRO
-
-MACRO1(END_FUNCTION, c_name)
- .cfi_endproc
- .size \c_name, .-\c_name
-END_MACRO
-
-MACRO1(PUSH, reg)
- pushl REG_VAR(reg, 0)
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset REG_VAR(reg, 0), 0
-END_MACRO
-
-MACRO1(POP, reg)
- popl REG_VAR(reg,0)
- .cfi_adjust_cfa_offset -4
- .cfi_restore REG_VAR(reg,0)
-END_MACRO
+#include "asm_support_x86.S"
/*
* Macro that sets up the callee save frame to conform with
@@ -302,55 +233,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvoke
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Portable invocation stub.
- * On entry:
- * [sp] = return address
- * [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
- * [sp + 12] = size of argument array in bytes
- * [sp + 16] = (managed) thread pointer
- * [sp + 20] = JValue* result
- * [sp + 24] = result type char
- */
-DEFINE_FUNCTION art_portable_invoke_stub
- PUSH ebp // save ebp
- PUSH ebx // save ebx
- mov %esp, %ebp // copy value of stack pointer into base pointer
- .cfi_def_cfa_register ebp
- mov 20(%ebp), %ebx // get arg array size
- addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
- andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
- subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
- subl %ebx, %esp // reserve stack space for argument array
- lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
- pushl 20(%ebp) // push size of region to memcpy
- pushl 16(%ebp) // push arg array as source of memcpy
- pushl %eax // push stack pointer as destination of memcpy
- call SYMBOL(memcpy) // (void*, const void*, size_t)
- addl LITERAL(12), %esp // pop arguments to memcpy
- mov 12(%ebp), %eax // move method pointer into eax
- mov %eax, (%esp) // push method pointer onto stack
- call *METHOD_CODE_OFFSET(%eax) // call the method
- mov %ebp, %esp // restore stack pointer
- POP ebx // pop ebx
- POP ebp // pop ebp
- mov 20(%esp), %ecx // get result pointer
- cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
- je return_double_portable
- cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
- je return_float_portable
- mov %eax, (%ecx) // store the result
- mov %edx, 4(%ecx) // store the other half of the result
- ret
-return_double_portable:
- fstpl (%ecx) // store the floating point result as double
- ret
-return_float_portable:
- fstps (%ecx) // store the floating point result as float
- ret
-END_FUNCTION art_portable_invoke_stub
-
- /*
* Quick invocation stub.
* On entry:
* [sp] = return address
@@ -920,22 +802,6 @@ DEFINE_FUNCTION art_quick_get_obj_static_from_code
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get_obj_static_from_code
-DEFINE_FUNCTION art_portable_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
- PUSH esp // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- PUSH ecx // pass receiver
- PUSH eax // pass proxy method
- call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
- movd %eax, %xmm0 // place return value also into floating point return value
- movd %edx, %xmm1
- punpckldq %xmm1, %xmm0
- addl LITERAL(44), %esp // pop arguments
- .cfi_adjust_cfa_offset -44
- ret
-END_FUNCTION art_portable_proxy_invoke_handler
-
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
PUSH esp // pass SP
@@ -1054,24 +920,6 @@ DEFINE_FUNCTION art_quick_deoptimize
END_FUNCTION art_quick_deoptimize
/*
- * Portable abstract method error stub. method* is at %esp + 4 on entry.
- */
-DEFINE_FUNCTION art_portable_abstract_method_error_stub
- PUSH ebp
- movl %esp, %ebp // Remember SP.
- .cfi_def_cfa_register ebp
- subl LITERAL(12), %esp // Align stack.
- PUSH esp // Pass sp (not used).
- pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
- pushl 8(%ebp) // Pass Method*.
- call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP)
- leave // Restore the stack and %ebp.
- .cfi_def_cfa esp, 4
- .cfi_restore ebp
- ret // Return to caller to handle pending exception.
-END_FUNCTION art_portable_abstract_method_error_stub
-
- /*
* Quick abstract method error stub. %eax contains method* on entry.
*/
DEFINE_FUNCTION art_quick_abstract_method_error_stub
@@ -1087,24 +935,6 @@ DEFINE_FUNCTION art_quick_abstract_method_error_stub
END_FUNCTION art_quick_abstract_method_error_stub
/*
- * Portable resolution trampoline.
- */
-DEFINE_FUNCTION art_jni_dlsym_lookup_stub
- subl LITERAL(8), %esp // align stack
- .cfi_adjust_cfa_offset 8
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- call SYMBOL(artFindNativeMethod) // (Thread*)
- addl LITERAL(12), %esp // restore the stack
- .cfi_adjust_cfa_offset -12
- cmpl LITERAL(0), %eax // check if returned method code is null
- je no_native_code_found // if null, jump to return to handle
- jmp *%eax // otherwise, tail call to intended method
-no_native_code_found:
- ret
-END_FUNCTION art_jni_dlsym_lookup_stub
-
- /*
* String's indexOf.
*
* On entry:
diff --git a/runtime/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index c398b28..dd3e7dd 100644
--- a/runtime/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -19,7 +19,7 @@
#include <sys/syscall.h>
#include <sys/types.h>
-#include "asm_support.h"
+#include "asm_support_x86.h"
#include "base/macros.h"
#include "thread.h"
#include "thread_list.h"
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 7b20c7a..aca93a5 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -30,29 +30,4 @@
// Offset of field Method::entry_point_from_compiled_code_
#define METHOD_CODE_OFFSET 40
-#if defined(__arm__)
-// Register holding suspend check count down.
-#define rSUSPEND r4
-// Register holding Thread::Current().
-#define rSELF r9
-// Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 12
-#elif defined(__mips__)
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
-// Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 12
-#elif defined(__i386__)
-// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 40
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 12
-#endif
-
#endif // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 72e0f48..84f186d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -58,10 +58,7 @@
#include "object_utils.h"
#include "os.h"
#include "runtime.h"
-#include "runtime_support.h"
-#if defined(ART_USE_PORTABLE_COMPILER)
-#include "runtime_support_llvm.h"
-#endif
+#include "entrypoints/entrypoint_utils.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 75886cf..4659fd1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -22,6 +22,7 @@
#include "class_linker-inl.h"
#include "common_test.h"
#include "dex_file.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc/heap.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -32,7 +33,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
#include "mirror/stack_trace_element.h"
-#include "runtime_support.h"
#include "sirt_ref.h"
using ::art::mirror::AbstractMethod;
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 2c23340..7ee6fe2 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -31,6 +31,7 @@
#include "class_linker.h"
#include "compiler/driver/compiler_driver.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
#include "instruction_set.h"
@@ -39,7 +40,6 @@
#include "object_utils.h"
#include "os.h"
#include "runtime.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "thread.h"
diff --git a/runtime/runtime_support.cc b/runtime/entrypoints/entrypoint_utils.cc
index d28aad1..c297841 100644
--- a/runtime/runtime_support.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -30,74 +30,6 @@
#include "ScopedLocalRef.h"
#include "well_known_classes.h"
-double art_l2d(int64_t l) {
- return static_cast<double>(l);
-}
-
-float art_l2f(int64_t l) {
- return static_cast<float>(l);
-}
-
-/*
- * Float/double conversion requires clamping to min and max of integer form. If
- * target doesn't support this normally, use these.
- */
-int64_t art_d2l(double d) {
- static const double kMaxLong = static_cast<double>(static_cast<int64_t>(0x7fffffffffffffffULL));
- static const double kMinLong = static_cast<double>(static_cast<int64_t>(0x8000000000000000ULL));
- if (d >= kMaxLong) {
- return static_cast<int64_t>(0x7fffffffffffffffULL);
- } else if (d <= kMinLong) {
- return static_cast<int64_t>(0x8000000000000000ULL);
- } else if (d != d) { // NaN case
- return 0;
- } else {
- return static_cast<int64_t>(d);
- }
-}
-
-int64_t art_f2l(float f) {
- static const float kMaxLong = static_cast<float>(static_cast<int64_t>(0x7fffffffffffffffULL));
- static const float kMinLong = static_cast<float>(static_cast<int64_t>(0x8000000000000000ULL));
- if (f >= kMaxLong) {
- return static_cast<int64_t>(0x7fffffffffffffffULL);
- } else if (f <= kMinLong) {
- return static_cast<int64_t>(0x8000000000000000ULL);
- } else if (f != f) { // NaN case
- return 0;
- } else {
- return static_cast<int64_t>(f);
- }
-}
-
-int32_t art_d2i(double d) {
- static const double kMaxInt = static_cast<double>(static_cast<int32_t>(0x7fffffffUL));
- static const double kMinInt = static_cast<double>(static_cast<int32_t>(0x80000000UL));
- if (d >= kMaxInt) {
- return static_cast<int32_t>(0x7fffffffUL);
- } else if (d <= kMinInt) {
- return static_cast<int32_t>(0x80000000UL);
- } else if (d != d) { // NaN case
- return 0;
- } else {
- return static_cast<int32_t>(d);
- }
-}
-
-int32_t art_f2i(float f) {
- static const float kMaxInt = static_cast<float>(static_cast<int32_t>(0x7fffffffUL));
- static const float kMinInt = static_cast<float>(static_cast<int32_t>(0x80000000UL));
- if (f >= kMaxInt) {
- return static_cast<int32_t>(0x7fffffffUL);
- } else if (f <= kMinInt) {
- return static_cast<int32_t>(0x80000000UL);
- } else if (f != f) { // NaN case
- return 0;
- } else {
- return static_cast<int32_t>(f);
- }
-}
-
namespace art {
// Helper function to allocate array for FILLED_NEW_ARRAY.
diff --git a/runtime/runtime_support.h b/runtime/entrypoints/entrypoint_utils.h
index 43c6784..3f28b5e 100644
--- a/runtime/runtime_support.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_RUNTIME_SUPPORT_H_
-#define ART_RUNTIME_RUNTIME_SUPPORT_H_
+#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
+#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
#include "class_linker.h"
#include "common_throws.h"
@@ -42,13 +42,6 @@ extern "C" void art_quick_interpreter_entry(void*);
extern "C" void art_quick_proxy_invoke_handler();
extern "C" void art_work_around_app_jni_bugs();
-extern "C" double art_l2d(int64_t l);
-extern "C" float art_l2f(int64_t l);
-extern "C" int64_t art_d2l(double d);
-extern "C" int32_t art_d2i(double d);
-extern "C" int64_t art_f2l(float f);
-extern "C" int32_t art_f2i(float f);
-
namespace art {
namespace mirror {
class Class;
@@ -416,4 +409,4 @@ static inline void* GetJniDlsymLookupStub() {
} // namespace art
-#endif // ART_RUNTIME_RUNTIME_SUPPORT_H_
+#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
new file mode 100644
index 0000000..98f7b12
--- /dev/null
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "mirror/abstract_method.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
+extern "C" void* artFindNativeMethod(Thread* self) {
+ Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
+ DCHECK(Thread::Current() == self);
+ ScopedObjectAccess soa(self);
+
+ mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
+ DCHECK(method != NULL);
+
+ // Lookup symbol address for method, on failure we'll return NULL with an
+ // exception set, otherwise we return the address of the method we found.
+ void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
+ if (native_code == NULL) {
+ DCHECK(self->IsExceptionPending());
+ return NULL;
+ } else {
+ // Register so that future calls don't come here
+ method->RegisterNative(self, native_code);
+ return native_code;
+ }
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/math_entrypoints.cc b/runtime/entrypoints/math_entrypoints.cc
new file mode 100644
index 0000000..31d13c8
--- /dev/null
+++ b/runtime/entrypoints/math_entrypoints.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "math_entrypoints.h"
+
+namespace art {
+
+extern "C" double art_l2d(int64_t l) {
+ return static_cast<double>(l);
+}
+
+extern "C" float art_l2f(int64_t l) {
+ return static_cast<float>(l);
+}
+
+/*
+ * Float/double conversion requires clamping to min and max of integer form. If
+ * target doesn't support this normally, use these.
+ */
+extern "C" int64_t art_d2l(double d) {
+ static const double kMaxLong = static_cast<double>(static_cast<int64_t>(0x7fffffffffffffffULL));
+ static const double kMinLong = static_cast<double>(static_cast<int64_t>(0x8000000000000000ULL));
+ if (d >= kMaxLong) {
+ return static_cast<int64_t>(0x7fffffffffffffffULL);
+ } else if (d <= kMinLong) {
+ return static_cast<int64_t>(0x8000000000000000ULL);
+ } else if (d != d) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int64_t>(d);
+ }
+}
+
+extern "C" int64_t art_f2l(float f) {
+ static const float kMaxLong = static_cast<float>(static_cast<int64_t>(0x7fffffffffffffffULL));
+ static const float kMinLong = static_cast<float>(static_cast<int64_t>(0x8000000000000000ULL));
+ if (f >= kMaxLong) {
+ return static_cast<int64_t>(0x7fffffffffffffffULL);
+ } else if (f <= kMinLong) {
+ return static_cast<int64_t>(0x8000000000000000ULL);
+ } else if (f != f) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int64_t>(f);
+ }
+}
+
+extern "C" int32_t art_d2i(double d) {
+ static const double kMaxInt = static_cast<double>(static_cast<int32_t>(0x7fffffffUL));
+ static const double kMinInt = static_cast<double>(static_cast<int32_t>(0x80000000UL));
+ if (d >= kMaxInt) {
+ return static_cast<int32_t>(0x7fffffffUL);
+ } else if (d <= kMinInt) {
+ return static_cast<int32_t>(0x80000000UL);
+ } else if (d != d) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int32_t>(d);
+ }
+}
+
+extern "C" int32_t art_f2i(float f) {
+ static const float kMaxInt = static_cast<float>(static_cast<int32_t>(0x7fffffffUL));
+ static const float kMinInt = static_cast<float>(static_cast<int32_t>(0x80000000UL));
+ if (f >= kMaxInt) {
+ return static_cast<int32_t>(0x7fffffffUL);
+ } else if (f <= kMinInt) {
+ return static_cast<int32_t>(0x80000000UL);
+ } else if (f != f) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int32_t>(f);
+ }
+}
+
+} // namespace art
diff --git a/runtime/runtime_support_llvm.h b/runtime/entrypoints/math_entrypoints.h
index 43ea953..717c734 100644
--- a/runtime/runtime_support_llvm.h
+++ b/runtime/entrypoints/math_entrypoints.h
@@ -14,14 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
-#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
+#ifndef ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_
-extern "C" {
-//----------------------------------------------------------------------------
-// Runtime Support Function Lookup Callback
-//----------------------------------------------------------------------------
-void* art_portable_find_runtime_support_func(void* context, const char* name);
-} // extern "C"
+#include <stdint.h>
-#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
+extern "C" double art_l2d(int64_t l);
+extern "C" float art_l2f(int64_t l);
+extern "C" int64_t art_d2l(double d);
+extern "C" int32_t art_d2i(double d);
+extern "C" int64_t art_f2l(float f);
+extern "C" int32_t art_f2i(float f);
+
+#endif // ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_
diff --git a/runtime/runtime_support_test.cc b/runtime/entrypoints/math_entrypoints_test.cc
index b827813..ca8b931 100644
--- a/runtime/runtime_support_test.cc
+++ b/runtime/entrypoints/math_entrypoints_test.cc
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#include "runtime_support.h"
+#include "math_entrypoints.h"
#include "common_test.h"
#include <limits>
namespace art {
-class RuntimeSupportTest : public CommonTest {};
+class MathEntrypointsTest : public CommonTest {};
-TEST_F(RuntimeSupportTest, DoubleToLong) {
+TEST_F(MathEntrypointsTest, DoubleToLong) {
EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_d2l(1.85e19));
EXPECT_EQ(std::numeric_limits<int64_t>::min(), art_d2l(-1.85e19));
EXPECT_EQ(0LL, art_d2l(0));
@@ -35,7 +35,7 @@ TEST_F(RuntimeSupportTest, DoubleToLong) {
EXPECT_EQ(-100LL, art_d2l(-100.0));
}
-TEST_F(RuntimeSupportTest, FloatToLong) {
+TEST_F(MathEntrypointsTest, FloatToLong) {
EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_f2l(1.85e19));
EXPECT_EQ(std::numeric_limits<int64_t>::min(), art_f2l(-1.85e19));
EXPECT_EQ(0LL, art_f2l(0));
@@ -47,7 +47,7 @@ TEST_F(RuntimeSupportTest, FloatToLong) {
EXPECT_EQ(-100LL, art_f2l(-100.0));
}
-TEST_F(RuntimeSupportTest, DoubleToInt) {
+TEST_F(MathEntrypointsTest, DoubleToInt) {
EXPECT_EQ(std::numeric_limits<int32_t>::max(), art_d2i(4.3e9));
EXPECT_EQ(std::numeric_limits<int32_t>::min(), art_d2i(-4.3e9));
EXPECT_EQ(0L, art_d2i(0));
@@ -59,7 +59,7 @@ TEST_F(RuntimeSupportTest, DoubleToInt) {
EXPECT_EQ(-100L, art_d2i(-100.0));
}
-TEST_F(RuntimeSupportTest, FloatToInt) {
+TEST_F(MathEntrypointsTest, FloatToInt) {
EXPECT_EQ(std::numeric_limits<int32_t>::max(), art_f2i(4.3e9));
EXPECT_EQ(std::numeric_limits<int32_t>::min(), art_f2i(-4.3e9));
EXPECT_EQ(0L, art_f2i(0));
diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
new file mode 100644
index 0000000..2869269
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocObjectFromCode(type_idx, referrer, thread, false);
+}
+
+extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocObjectFromCode(type_idx, referrer, thread, true);
+}
+
+extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocArrayFromCode(type_idx, referrer, length, self, false);
+}
+
+extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocArrayFromCode(type_idx, referrer, length, self, true);
+}
+
+extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
+}
+
+extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_argument_visitor.h b/runtime/entrypoints/portable/portable_argument_visitor.h
new file mode 100644
index 0000000..f268baf
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_argument_visitor.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
+#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
+
+#include "object_utils.h"
+
+namespace art {
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class PortableArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define PORTABLE_STACK_ARG_SKIP 0
+#elif defined(__mips__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define PORTABLE_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define PORTABLE_STACK_ARG_SKIP 4
+#else
+#error "Unsupported architecture"
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define PORTABLE_STACK_ARG_SKIP 0
+#endif
+
+ PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ caller_mh_(caller_mh),
+ args_in_regs_(ComputeArgsInRegs(caller_mh)),
+ num_params_(caller_mh.NumArgs()),
+ reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+ stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ + PORTABLE_STACK_ARG_SKIP),
+ cur_args_(reg_args_),
+ cur_arg_index_(0),
+ param_index_(0) {
+ }
+
+ virtual ~PortableArgumentVisitor() {}
+
+ virtual void Visit() = 0;
+
+ bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamAReference(param_index_);
+ }
+
+ bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamALongOrDouble(param_index_);
+ }
+
+ Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.GetParamPrimitiveType(param_index_);
+ }
+
+ byte* GetParamAddress() const {
+ return cur_args_ + (cur_arg_index_ * kPointerSize);
+ }
+
+ void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+#if (defined(__arm__) || defined(__mips__))
+ if (IsParamALongOrDouble() && cur_arg_index_ == 2) {
+ break;
+ }
+#endif
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ cur_args_ = stack_args_;
+ cur_arg_index_ = 0;
+ while (param_index_ < num_params_) {
+#if (defined(__arm__) || defined(__mips__))
+ if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) {
+ cur_arg_index_++;
+ }
+#endif
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ }
+
+ private:
+ static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if (defined(__i386__))
+ return 0;
+#else
+ size_t args_in_regs = 0;
+ size_t num_params = mh.NumArgs();
+ for (size_t i = 0; i < num_params; i++) {
+ args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
+ if (args_in_regs > 3) {
+ args_in_regs = 3;
+ break;
+ }
+ }
+ return args_in_regs;
+#endif
+ }
+ MethodHelper& caller_mh_;
+ const size_t args_in_regs_;
+ const size_t num_params_;
+ byte* const reg_args_;
+ byte* const stack_args_;
+ byte* cur_args_;
+ size_t cur_arg_index_;
+ size_t param_index_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
new file mode 100644
index 0000000..d343c5d
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_throws.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type,
+ const mirror::Class* src_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(dest_type != NULL);
+ DCHECK(src_type != NULL);
+ return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
+}
+
+extern "C" void art_portable_check_cast_from_code(const mirror::Class* dest_type,
+ const mirror::Class* src_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
+ DCHECK(src_type->IsClass()) << PrettyClass(src_type);
+ if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) {
+ ThrowClassCastException(dest_type, src_type);
+ }
+}
+
+extern "C" void art_portable_check_put_array_element_from_code(const mirror::Object* element,
+ const mirror::Object* array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (element == NULL) {
+ return;
+ }
+ DCHECK(array != NULL);
+ mirror::Class* array_class = array->GetClass();
+ DCHECK(array_class != NULL);
+ mirror::Class* component_type = array_class->GetComponentType();
+ mirror::Class* element_class = element->GetClass();
+ if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
+ ThrowArrayStoreException(element_class, array_class);
+ }
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc
new file mode 100644
index 0000000..bdab587
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "gc/accounting/card_table-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
+}
+
+extern "C" mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
+}
+
+extern "C" mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called when caller isn't guaranteed to have access to a type and the dex cache may be
+ // unpopulated
+ return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
+}
+
+extern "C" mirror::Object* art_portable_resolve_string_from_code(mirror::AbstractMethod* referrer,
+ uint32_t string_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ResolveStringFromCode(referrer, string_idx);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h
new file mode 100644
index 0000000..a229c76
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_entrypoints.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
+
+#include "dex_file-inl.h"
+#include "runtime.h"
+
+namespace art {
+namespace mirror {
+ class AbstractMethod;
+ class Object;
+} // namespace mirror
+class Thread;
+
+#define PORTABLE_ENTRYPOINT_OFFSET(x) \
+ (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \
+ static_cast<uintptr_t>(OFFSETOF_MEMBER(PortableEntryPoints, x)))
+
+// Pointers to functions that are called by code generated by compiler's adhering to the portable
+// compiler ABI.
+struct PACKED(4) PortableEntryPoints {
+ // Invocation
+ const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
+ mirror::AbstractMethod**, Thread*);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc
new file mode 100644
index 0000000..aa0f03c
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" int32_t art_portable_set32_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ int32_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx,
+ referrer,
+ StaticPrimitiveWrite,
+ sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ field->Set32(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx,
+ referrer,
+ Thread::Current(),
+ StaticPrimitiveWrite,
+ sizeof(uint32_t),
+ true);
+ if (LIKELY(field != NULL)) {
+ field->Set32(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set64_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ int64_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ field->Set64(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx,
+ referrer,
+ Thread::Current(),
+ StaticPrimitiveWrite,
+ sizeof(uint64_t),
+ true);
+ if (LIKELY(field != NULL)) {
+ field->Set64(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set_obj_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ field->SetObj(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticObjectWrite, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ field->SetObj(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_get32_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get32(field->GetDeclaringClass());
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticPrimitiveRead, sizeof(uint32_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get32(field->GetDeclaringClass());
+ }
+ return 0;
+}
+
+extern "C" int64_t art_portable_get64_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get64(field->GetDeclaringClass());
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticPrimitiveRead, sizeof(uint64_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get64(field->GetDeclaringClass());
+ }
+ return 0;
+}
+
+extern "C" mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(field->GetDeclaringClass());
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticObjectRead, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(field->GetDeclaringClass());
+ }
+ return 0;
+}
+
+extern "C" int32_t art_portable_set32_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj, uint32_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ field->Set32(obj, new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveWrite, sizeof(uint32_t), true);
+ if (LIKELY(field != NULL)) {
+ field->Set32(obj, new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set64_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj, int64_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ field->Set64(obj, new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveWrite, sizeof(uint64_t), true);
+ if (LIKELY(field != NULL)) {
+ field->Set64(obj, new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj,
+ mirror::Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ field->SetObj(obj, new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstanceObjectWrite, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ field->SetObj(obj, new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_get32_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get32(obj);
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveRead, sizeof(uint32_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get32(obj);
+ }
+ return 0;
+}
+
+extern "C" int64_t art_portable_get64_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get64(obj);
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveRead, sizeof(uint64_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get64(obj);
+ }
+ return 0;
+}
+
+extern "C" mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(obj);
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstanceObjectRead, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(obj);
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
new file mode 100644
index 0000000..771608b
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" void art_portable_fill_array_data_from_code(mirror::AbstractMethod* method,
+ uint32_t dex_pc,
+ mirror::Array* array,
+ uint32_t payload_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::CodeItem* code_item = MethodHelper(method).GetCodeItem();
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
+ DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ return; // Error
+ }
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count - 1);
+ return; // Error
+ }
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
new file mode 100644
index 0000000..5911ba3
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ bool access_check,
+ InvokeType type,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* method = FindMethodFast(method_idx,
+ this_object,
+ caller_method,
+ access_check,
+ type);
+ if (UNLIKELY(method == NULL)) {
+ method = FindMethodFromCode(method_idx, this_object, caller_method,
+ thread, access_check, type);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(thread->IsExceptionPending());
+ return 0; // failure
+ }
+ }
+ DCHECK(!thread->IsExceptionPending());
+ const void* code = method->GetEntryPointFromCompiledCode();
+
+ // When we return, the caller will branch to this address, so it had better not be 0!
+ if (UNLIKELY(code == NULL)) {
+ MethodHelper mh(method);
+ LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
+ << " location: " << mh.GetDexFile().GetLocation();
+ }
+ return method;
+}
+
+extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc
new file mode 100644
index 0000000..8df16ae
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_jni_entrypoints.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
+extern "C" uint32_t art_portable_jni_method_start(Thread* self)
+ UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) {
+ JNIEnvExt* env = self->GetJniEnv();
+ uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ env->local_ref_cookie = env->locals.GetSegmentState();
+ self->TransitionFromRunnableToSuspended(kNative);
+ return saved_local_ref_cookie;
+}
+
+extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self)
+ UNLOCK_FUNCTION(Locks::mutator_lock_) {
+ self->DecodeJObject(to_lock)->MonitorEnter(self);
+ return art_portable_jni_method_start(self);
+}
+
+static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
+ JNIEnvExt* env = self->GetJniEnv();
+ env->locals.SetSegmentState(env->local_ref_cookie);
+ env->local_ref_cookie = saved_local_ref_cookie;
+}
+
+extern "C" void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+
+extern "C" void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+extern "C" mirror::Object* art_portable_jni_method_end_with_reference(jobject result,
+ uint32_t saved_local_ref_cookie,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ mirror::Object* o = self->DecodeJObject(result); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+extern "C" mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ mirror::Object* o = self->DecodeJObject(result);
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc
new file mode 100644
index 0000000..44d3da9
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_lock_entrypoints.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread)
+ EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
+ DCHECK(obj != NULL); // Assumed to have been checked before entry.
+ obj->MonitorEnter(thread); // May block.
+ DCHECK(thread->HoldsLock(obj));
+ // Only possible exception is NPE and is handled before entry.
+ DCHECK(!thread->IsExceptionPending());
+}
+
+extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread)
+ UNLOCK_FUNCTION(monitor_lock_) {
+ DCHECK(obj != NULL); // Assumed to have been checked before entry.
+ // MonitorExit may throw exception.
+ obj->MonitorExit(thread);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_proxy_entrypoints.cc b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc
new file mode 100644
index 0000000..3db39cd
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "portable_argument_visitor.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildPortableArgumentVisitor : public PortableArgumentVisitor {
+ public:
+ BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
+ ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
+ PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
+
+ virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jvalue val;
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimNot: {
+ mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+ val.l = soa_.AddLocalReference<jobject>(obj);
+ break;
+ }
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ val.i = *reinterpret_cast<jint*>(GetParamAddress());
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ val.j = 0;
+ break;
+ }
+ args_.push_back(val);
+ }
+
+ private:
+ ScopedObjectAccessUnchecked& soa_;
+ std::vector<jvalue>& args_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor);
+};
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+ mirror::Object* receiver,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+ const char* old_cause =
+ self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+ self->VerifyStack();
+ // Start new JNI local reference state.
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ // Create local ref. copies of proxy method and the receiver.
+ jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+ // Placing arguments into args vector and remove the receiver.
+ MethodHelper proxy_mh(proxy_method);
+ std::vector<jvalue> args;
+ BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
+ local_ref_visitor.VisitArguments();
+ args.erase(args.begin());
+
+ // Convert proxy method into expected interface method.
+ mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+ DCHECK(interface_method != NULL);
+ DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+ // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+ // that performs allocations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+ rcvr_jobj, interface_method_jobj, args);
+ return result.GetJ();
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_stub_entrypoints.cc b/runtime/entrypoints/portable/portable_stub_entrypoints.cc
new file mode 100644
index 0000000..c510c65
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_stub_entrypoints.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_instruction-inl.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+// Lazily resolve a method for portable. Called by stub code.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** called_addr,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t dex_pc;
+ mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc);
+
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ InvokeType invoke_type;
+ bool is_range;
+ if (called->IsRuntimeMethod()) {
+ const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
+ CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ switch (instr_code) {
+ case Instruction::INVOKE_DIRECT:
+ invoke_type = kDirect;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_STATIC:
+ invoke_type = kStatic;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ invoke_type = kSuper;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ invoke_type = kVirtual;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ invoke_type = kInterface;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ is_range = true;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
+ // Avoid used uninitialized warnings.
+ invoke_type = kDirect;
+ is_range = true;
+ }
+ uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
+ called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+ // Refine called method based on receiver.
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else if (invoke_type == kInterface) {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
+ } else {
+ CHECK(called->IsStatic()) << PrettyMethod(called);
+ invoke_type = kStatic;
+ }
+ const void* code = NULL;
+ if (LIKELY(!thread->IsExceptionPending())) {
+ // Incompatible class change should have been handled in resolve method.
+ CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+ // Ensure that the called method's class is initialized.
+ mirror::Class* called_class = called->GetDeclaringClass();
+ linker->EnsureInitialized(called_class, true, true);
+ if (LIKELY(called_class->IsInitialized())) {
+ code = called->GetEntryPointFromCompiledCode();
+ // TODO: remove this after we solve the link issue.
+ { // for lazy link.
+ if (code == NULL) {
+ code = linker->GetOatCodeFor(called);
+ }
+ }
+ } else if (called_class->IsInitializing()) {
+ if (invoke_type == kStatic) {
+ // Class is still initializing, go to oat and grab code (trampoline must be left in place
+ // until class is initialized to stop races between threads).
+ code = linker->GetOatCodeFor(called);
+ } else {
+ // No trampoline for non-static methods.
+ code = called->GetEntryPointFromCompiledCode();
+ // TODO: remove this after we solve the link issue.
+ { // for lazy link.
+ if (code == NULL) {
+ code = linker->GetOatCodeFor(called);
+ }
+ }
+ }
+ } else {
+ DCHECK(called_class->IsErroneous());
+ }
+ }
+ if (LIKELY(code != NULL)) {
+ // Expect class to at least be initializing.
+ DCHECK(called->GetDeclaringClass()->IsInitializing());
+ // Don't want infinite recursion.
+ DCHECK(code != GetResolutionTrampoline(linker));
+ // Set up entry into main method
+ *called_addr = called;
+ }
+ return code;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
new file mode 100644
index 0000000..dac7388
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method.h"
+#include "mirror/object-inl.h"
+#include "verifier/dex_gc_map.h"
+#include "stack.h"
+
+namespace art {
+
+class ShadowFrameCopyVisitor : public StackVisitor {
+ public:
+ explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
+ top_frame_(NULL) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (IsShadowFrame()) {
+ ShadowFrame* cur_frame = GetCurrentShadowFrame();
+ size_t num_regs = cur_frame->NumberOfVRegs();
+ mirror::AbstractMethod* method = cur_frame->GetMethod();
+ uint32_t dex_pc = cur_frame->GetDexPC();
+ ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
+
+ const uint8_t* gc_map = method->GetNativeGcMap();
+ uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
+ (gc_map[1] << 16) |
+ (gc_map[2] << 8) |
+ (gc_map[3] << 0));
+ verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
+ const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ if (TestBitmap(reg, reg_bitmap)) {
+ new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg));
+ } else {
+ new_frame->SetVReg(reg, cur_frame->GetVReg(reg));
+ }
+ }
+
+ if (prev_frame_ != NULL) {
+ prev_frame_->SetLink(new_frame);
+ } else {
+ top_frame_ = new_frame;
+ }
+ prev_frame_ = new_frame;
+ }
+ return true;
+ }
+
+ ShadowFrame* GetShadowFrameCopy() {
+ return top_frame_;
+ }
+
+ private:
+ static bool TestBitmap(int reg, const uint8_t* reg_vector) {
+ return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
+ }
+
+ ShadowFrame* prev_frame_;
+ ShadowFrame* top_frame_;
+};
+
+extern "C" void art_portable_test_suspend_from_code(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckSuspend(self);
+ if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
+ // Save out the shadow frame to the heap
+ ShadowFrameCopyVisitor visitor(self);
+ visitor.WalkStack(true);
+ self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
+ self->SetDeoptimizationReturnValue(JValue());
+ self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+ }
+}
+
+extern "C" ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread,
+ ShadowFrame* new_shadow_frame,
+ mirror::AbstractMethod* method,
+ uint32_t num_vregs) {
+ ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
+ new_shadow_frame->SetMethod(method);
+ new_shadow_frame->SetNumberOfVRegs(num_vregs);
+ return old_frame;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
new file mode 100644
index 0000000..4b2b46b
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowArithmeticExceptionDivideByZero();
+}
+
+extern "C" void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowArrayIndexOutOfBoundsException(index, length);
+}
+
+extern "C" void art_portable_throw_no_such_method_from_code(int32_t method_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowNoSuchMethodError(method_idx);
+}
+
+extern "C" void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // TODO: remove dex_pc argument from caller.
+ UNUSED(dex_pc);
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionFromDexPC(throw_location);
+}
+
+extern "C" void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowStackOverflowError(Thread::Current());
+}
+
+extern "C" void art_portable_throw_exception_from_code(mirror::Throwable* exception)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ if (exception == NULL) {
+ ThrowNullPointerException(NULL, "throw with null exception");
+ } else {
+ self->SetException(throw_location, exception);
+ }
+}
+
+extern "C" void* art_portable_get_and_clear_exception(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(self->IsExceptionPending());
+ // TODO: make this inline.
+ mirror::Throwable* exception = self->GetException(NULL);
+ self->ClearException();
+ return exception;
+}
+
+extern "C" int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method,
+ uint32_t ti_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current(); // TODO: make an argument.
+ ThrowLocation throw_location;
+ mirror::Throwable* exception = self->GetException(&throw_location);
+ // Check for special deoptimization exception.
+ if (UNLIKELY(reinterpret_cast<int32_t>(exception) == -1)) {
+ return -1;
+ }
+ mirror::Class* exception_type = exception->GetClass();
+ MethodHelper mh(current_method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ DCHECK_LT(ti_offset, code_item->tries_size_);
+ const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset);
+
+ int iter_index = 0;
+ int result = -1;
+ uint32_t catch_dex_pc = -1;
+ // Iterate over the catch handlers associated with dex_pc
+ for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) {
+ uint16_t iter_type_idx = it.GetHandlerTypeIndex();
+ // Catch all case
+ if (iter_type_idx == DexFile::kDexNoIndex16) {
+ catch_dex_pc = it.GetHandlerAddress();
+ result = iter_index;
+ break;
+ }
+ // Does this catch exception type apply?
+ mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
+ if (UNLIKELY(iter_exception_type == NULL)) {
+ // TODO: check, the verifier (class linker?) should take care of resolving all exception
+ // classes early.
+ LOG(WARNING) << "Unresolved exception class when finding catch block: "
+ << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
+ } else if (iter_exception_type->IsAssignableFrom(exception_type)) {
+ catch_dex_pc = it.GetHandlerAddress();
+ result = iter_index;
+ break;
+ }
+ ++iter_index;
+ }
+ if (result != -1) {
+ // Handler found.
+ Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self,
+ throw_location,
+ current_method,
+ catch_dex_pc,
+ exception);
+ }
+ return result;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index f66fc84..9ed802a 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -15,11 +15,11 @@
*/
#include "callee_save_frame.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h
index 4f81151..35fa972 100644
--- a/runtime/entrypoints/quick/quick_argument_visitor.h
+++ b/runtime/entrypoints/quick/quick_argument_visitor.h
@@ -22,116 +22,6 @@
namespace art {
// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
-class PortableArgumentVisitor {
- public:
-// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
-// Size of Runtime::kRefAndArgs callee save frame.
-// Size of Method* and register parameters in out stack arguments.
-#if defined(__arm__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
-#define PORTABLE_STACK_ARG_SKIP 0
-#elif defined(__mips__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
-#define PORTABLE_STACK_ARG_SKIP 16
-#elif defined(__i386__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
-#define PORTABLE_STACK_ARG_SKIP 4
-#else
-#error "Unsupported architecture"
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define PORTABLE_STACK_ARG_SKIP 0
-#endif
-
- PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- caller_mh_(caller_mh),
- args_in_regs_(ComputeArgsInRegs(caller_mh)),
- num_params_(caller_mh.NumArgs()),
- reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
- stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
- + PORTABLE_STACK_ARG_SKIP),
- cur_args_(reg_args_),
- cur_arg_index_(0),
- param_index_(0) {
- }
-
- virtual ~PortableArgumentVisitor() {}
-
- virtual void Visit() = 0;
-
- bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamAReference(param_index_);
- }
-
- bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamALongOrDouble(param_index_);
- }
-
- Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.GetParamPrimitiveType(param_index_);
- }
-
- byte* GetParamAddress() const {
- return cur_args_ + (cur_arg_index_ * kPointerSize);
- }
-
- void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
-#if (defined(__arm__) || defined(__mips__))
- if (IsParamALongOrDouble() && cur_arg_index_ == 2) {
- break;
- }
-#endif
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- cur_args_ = stack_args_;
- cur_arg_index_ = 0;
- while (param_index_ < num_params_) {
-#if (defined(__arm__) || defined(__mips__))
- if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) {
- cur_arg_index_++;
- }
-#endif
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- }
-
- private:
- static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if (defined(__i386__))
- return 0;
-#else
- size_t args_in_regs = 0;
- size_t num_params = mh.NumArgs();
- for (size_t i = 0; i < num_params; i++) {
- args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
- if (args_in_regs > 3) {
- args_in_regs = 3;
- break;
- }
- }
- return args_in_regs;
-#endif
- }
- MethodHelper& caller_mh_;
- const size_t args_in_regs_;
- const size_t num_params_;
- byte* const reg_args_;
- byte* const stack_args_;
- byte* cur_args_;
- size_t cur_arg_index_;
- size_t param_index_;
-};
-
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
class QuickArgumentVisitor {
public:
// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index fe91e61..b810bb7 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -15,10 +15,10 @@
*/
#include "callee_save_frame.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 0af7a62..6400161 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -15,13 +15,13 @@
*/
#include "callee_save_frame.h"
-#include "gc/accounting/card_table-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
+#include "gc/accounting/card_table-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 8692e92..74b8cfd 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -20,15 +20,15 @@
#include "dex_file-inl.h"
#include "runtime.h"
-#define ENTRYPOINT_OFFSET(x) \
- (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, entrypoints_)) + \
+#define QUICK_ENTRYPOINT_OFFSET(x) \
+ (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \
static_cast<uintptr_t>(OFFSETOF_MEMBER(QuickEntryPoints, x)))
namespace art {
namespace mirror {
-class AbstractMethod;
-class Class;
-class Object;
+ class AbstractMethod;
+ class Class;
+ class Object;
} // namespace mirror
class DvmDex;
class MethodHelper;
@@ -123,8 +123,6 @@ struct PACKED(4) QuickEntryPoints {
void* (*pMemcpy)(void*, const void*, size_t);
// Invocation
- const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
- mirror::AbstractMethod**, Thread*);
const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
mirror::AbstractMethod**, Thread*);
void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
@@ -167,9 +165,6 @@ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
jobject locked, Thread* self)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
-// Initialize an entry point data structure, architecture specific.
-void InitEntryPoints(QuickEntryPoints* points);
-
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index c20326c..a4e9dc9 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -16,10 +16,10 @@
#include "callee_save_frame.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/field-inl.h"
-#include "runtime_support.h"
#include <stdint.h>
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index a0b06fb..b81ad12 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -52,7 +52,7 @@ extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array,
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
"failed FILL_ARRAY_DATA; length=%d, index=%d",
- array->GetLength(), payload->element_count);
+ array->GetLength(), payload->element_count - 1);
return -1; // Error
}
uint32_t size_in_bytes = payload->element_count * payload->element_width;
diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
index 6a95f3c..53b3628 100644
--- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
@@ -16,12 +16,12 @@
#include "callee_save_frame.h"
#include "dex_instruction-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 2d31160..23a28f9 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -15,13 +15,13 @@
*/
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
index e4ef45f..4e3d749 100644
--- a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
@@ -16,12 +16,12 @@
#include "quick_argument_visitor.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "object_utils.h"
#include "reflection.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -32,50 +32,6 @@ namespace art {
// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
// to jobjects.
-class BuildPortableArgumentVisitor : public PortableArgumentVisitor {
- public:
- BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
- ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
- PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
-
- virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- jvalue val;
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimNot: {
- mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
- val.l = soa_.AddLocalReference<jobject>(obj);
- break;
- }
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- val.j = *reinterpret_cast<jlong*>(GetParamAddress());
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- val.i = *reinterpret_cast<jint*>(GetParamAddress());
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
- }
- args_.push_back(val);
- }
-
- private:
- ScopedObjectAccessUnchecked& soa_;
- std::vector<jvalue>& args_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor);
-};
-
-// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
-// to jobjects.
class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
public:
BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
@@ -126,46 +82,6 @@ class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
// which is responsible for recording callee save registers. We explicitly place into jobjects the
// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
- mirror::Object* receiver,
- Thread* self, mirror::AbstractMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
- const char* old_cause =
- self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
- self->VerifyStack();
- // Start new JNI local reference state.
- JNIEnvExt* env = self->GetJniEnv();
- ScopedObjectAccessUnchecked soa(env);
- ScopedJniEnvLocalRefState env_state(env);
- // Create local ref. copies of proxy method and the receiver.
- jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
- // Placing arguments into args vector and remove the receiver.
- MethodHelper proxy_mh(proxy_method);
- std::vector<jvalue> args;
- BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
- local_ref_visitor.VisitArguments();
- args.erase(args.begin());
-
- // Convert proxy method into expected interface method.
- mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
- DCHECK(interface_method != NULL);
- DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
- // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
- // that performs allocations.
- self->EndAssertNoThreadSuspension(old_cause);
- JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
- rcvr_jobj, interface_method_jobj, args);
- return result.GetJ();
-}
-
-// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
-// which is responsible for recording callee save registers. We explicitly place into jobjects the
-// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
-// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
mirror::Object* receiver,
Thread* self, mirror::AbstractMethod** sp)
diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc
index f2af6d2..d78bbf3 100644
--- a/runtime/entrypoints/quick/quick_stub_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_stub_entrypoints.cc
@@ -30,127 +30,6 @@ extern "C" void art_quick_deliver_exception_from_code(void*);
namespace art {
-// Lazily resolve a method for portable. Called by stub code.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** called_addr,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t dex_pc;
- mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc);
-
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- InvokeType invoke_type;
- bool is_range;
- if (called->IsRuntimeMethod()) {
- const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
- CHECK_LT(dex_pc, code->insns_size_in_code_units_);
- const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
- Instruction::Code instr_code = instr->Opcode();
- switch (instr_code) {
- case Instruction::INVOKE_DIRECT:
- invoke_type = kDirect;
- is_range = false;
- break;
- case Instruction::INVOKE_DIRECT_RANGE:
- invoke_type = kDirect;
- is_range = true;
- break;
- case Instruction::INVOKE_STATIC:
- invoke_type = kStatic;
- is_range = false;
- break;
- case Instruction::INVOKE_STATIC_RANGE:
- invoke_type = kStatic;
- is_range = true;
- break;
- case Instruction::INVOKE_SUPER:
- invoke_type = kSuper;
- is_range = false;
- break;
- case Instruction::INVOKE_SUPER_RANGE:
- invoke_type = kSuper;
- is_range = true;
- break;
- case Instruction::INVOKE_VIRTUAL:
- invoke_type = kVirtual;
- is_range = false;
- break;
- case Instruction::INVOKE_VIRTUAL_RANGE:
- invoke_type = kVirtual;
- is_range = true;
- break;
- case Instruction::INVOKE_INTERFACE:
- invoke_type = kInterface;
- is_range = false;
- break;
- case Instruction::INVOKE_INTERFACE_RANGE:
- invoke_type = kInterface;
- is_range = true;
- break;
- default:
- LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
- // Avoid used uninitialized warnings.
- invoke_type = kDirect;
- is_range = true;
- }
- uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
- called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
- // Refine called method based on receiver.
- if (invoke_type == kVirtual) {
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
- } else if (invoke_type == kInterface) {
- called = receiver->GetClass()->FindVirtualMethodForInterface(called);
- }
- } else {
- CHECK(called->IsStatic()) << PrettyMethod(called);
- invoke_type = kStatic;
- }
- const void* code = NULL;
- if (LIKELY(!thread->IsExceptionPending())) {
- // Incompatible class change should have been handled in resolve method.
- CHECK(!called->CheckIncompatibleClassChange(invoke_type));
- // Ensure that the called method's class is initialized.
- mirror::Class* called_class = called->GetDeclaringClass();
- linker->EnsureInitialized(called_class, true, true);
- if (LIKELY(called_class->IsInitialized())) {
- code = called->GetEntryPointFromCompiledCode();
- // TODO: remove this after we solve the link issue.
- { // for lazy link.
- if (code == NULL) {
- code = linker->GetOatCodeFor(called);
- }
- }
- } else if (called_class->IsInitializing()) {
- if (invoke_type == kStatic) {
- // Class is still initializing, go to oat and grab code (trampoline must be left in place
- // until class is initialized to stop races between threads).
- code = linker->GetOatCodeFor(called);
- } else {
- // No trampoline for non-static methods.
- code = called->GetEntryPointFromCompiledCode();
- // TODO: remove this after we solve the link issue.
- { // for lazy link.
- if (code == NULL) {
- code = linker->GetOatCodeFor(called);
- }
- }
- }
- } else {
- DCHECK(called_class->IsErroneous());
- }
- }
- if (LIKELY(code != NULL)) {
- // Expect class to at least be initializing.
- DCHECK(called->GetDeclaringClass()->IsInitializing());
- // Don't want infinite recursion.
- DCHECK(code != GetResolutionTrampoline(linker));
- // Set up entry into main method
- *called_addr = called;
- }
- return code;
-}
-
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
mirror::Object* receiver,
@@ -413,26 +292,4 @@ extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* meth
self->QuickDeliverException();
}
-// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
-extern "C" void* artFindNativeMethod(Thread* self) {
- Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
- DCHECK(Thread::Current() == self);
- ScopedObjectAccess soa(self);
-
- mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
- DCHECK(method != NULL);
-
- // Lookup symbol address for method, on failure we'll return NULL with an
- // exception set, otherwise we return the address of the method we found.
- void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
- if (native_code == NULL) {
- DCHECK(self->IsExceptionPending());
- return NULL;
- } else {
- // Register so that future calls don't come here
- method->RegisterNative(self, native_code);
- return native_code;
- }
-}
-
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index e711714..b4d6c0b 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -15,7 +15,7 @@
*/
#include "callee_save_frame.h"
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
#include "thread.h"
#include "thread_list.h"
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 9588698..3bfa2f2 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -15,9 +15,9 @@
*/
#include "callee_save_frame.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/object.h"
#include "object_utils.h"
-#include "runtime_support.h"
#include "thread.h"
#include "well_known_classes.h"
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 37c45fa..ef4b95c 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -24,6 +24,7 @@
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc/accounting/card_table-inl.h"
#include "invoke_arg_array_builder.h"
#include "nth_caller_visitor.h"
@@ -35,7 +36,6 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
-#include "runtime_support.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h
index 2df1367..d235e3e 100644
--- a/runtime/mirror/abstract_method-inl.h
+++ b/runtime/mirror/abstract_method-inl.h
@@ -20,9 +20,9 @@
#include "abstract_method.h"
#include "dex_file.h"
+#include "entrypoints/entrypoint_utils.h"
#include "object_array.h"
#include "runtime.h"
-#include "runtime_support.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 53a1df9..540ff9f 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -26,6 +26,7 @@
#include "class_linker-inl.h"
#include "common_test.h"
#include "dex_file.h"
+#include "entrypoints/entrypoint_utils.h"
#include "field-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -33,7 +34,6 @@
#include "abstract_method-inl.h"
#include "object-inl.h"
#include "object_array-inl.h"
-#include "runtime_support.h"
#include "sirt_ref.h"
#include "UniquePtr.h"
diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc
deleted file mode 100644
index 93396d6..0000000
--- a/runtime/runtime_support_llvm.cc
+++ /dev/null
@@ -1,930 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runtime_support_llvm.h"
-
-#include "ScopedLocalRef.h"
-#include "asm_support.h"
-#include "class_linker.h"
-#include "class_linker-inl.h"
-#include "dex_file-inl.h"
-#include "dex_instruction.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache-inl.h"
-#include "mirror/field-inl.h"
-#include "mirror/object.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "nth_caller_visitor.h"
-#include "object_utils.h"
-#include "reflection.h"
-#include "runtime_support.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "thread_list.h"
-#include "verifier/dex_gc_map.h"
-#include "verifier/method_verifier.h"
-#include "well_known_classes.h"
-
-#include <algorithm>
-#include <math.h>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdlib.h>
-
-namespace art {
-
-using ::art::mirror::AbstractMethod;
-
-class ShadowFrameCopyVisitor : public StackVisitor {
- public:
- explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
- top_frame_(NULL) {}
-
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (IsShadowFrame()) {
- ShadowFrame* cur_frame = GetCurrentShadowFrame();
- size_t num_regs = cur_frame->NumberOfVRegs();
- AbstractMethod* method = cur_frame->GetMethod();
- uint32_t dex_pc = cur_frame->GetDexPC();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
-
- const uint8_t* gc_map = method->GetNativeGcMap();
- uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
- (gc_map[1] << 16) |
- (gc_map[2] << 8) |
- (gc_map[3] << 0));
- verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- if (TestBitmap(reg, reg_bitmap)) {
- new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg));
- } else {
- new_frame->SetVReg(reg, cur_frame->GetVReg(reg));
- }
- }
-
- if (prev_frame_ != NULL) {
- prev_frame_->SetLink(new_frame);
- } else {
- top_frame_ = new_frame;
- }
- prev_frame_ = new_frame;
- }
- return true;
- }
-
- ShadowFrame* GetShadowFrameCopy() {
- return top_frame_;
- }
-
- private:
- static bool TestBitmap(int reg, const uint8_t* reg_vector) {
- return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
- }
-
- ShadowFrame* prev_frame_;
- ShadowFrame* top_frame_;
-};
-
-} // namespace art
-
-extern "C" {
-using ::art::CatchHandlerIterator;
-using ::art::DexFile;
-using ::art::FindFieldFast;
-using ::art::FindMethodFast;
-using ::art::InstanceObjectRead;
-using ::art::InstanceObjectWrite;
-using ::art::InstancePrimitiveRead;
-using ::art::InstancePrimitiveWrite;
-using ::art::Instruction;
-using ::art::InvokeType;
-using ::art::JNIEnvExt;
-using ::art::JValue;
-using ::art::Locks;
-using ::art::MethodHelper;
-using ::art::PrettyClass;
-using ::art::PrettyMethod;
-using ::art::Primitive;
-using ::art::ResolveStringFromCode;
-using ::art::Runtime;
-using ::art::ScopedJniEnvLocalRefState;
-using ::art::ScopedObjectAccessUnchecked;
-using ::art::ShadowFrame;
-using ::art::ShadowFrameCopyVisitor;
-using ::art::StaticObjectRead;
-using ::art::StaticObjectWrite;
-using ::art::StaticPrimitiveRead;
-using ::art::StaticPrimitiveWrite;
-using ::art::Thread;
-using ::art::Thread;
-using ::art::ThrowArithmeticExceptionDivideByZero;
-using ::art::ThrowArrayIndexOutOfBoundsException;
-using ::art::ThrowArrayStoreException;
-using ::art::ThrowClassCastException;
-using ::art::ThrowLocation;
-using ::art::ThrowNoSuchMethodError;
-using ::art::ThrowNullPointerException;
-using ::art::ThrowNullPointerExceptionFromDexPC;
-using ::art::ThrowStackOverflowError;
-using ::art::kDirect;
-using ::art::kInterface;
-using ::art::kNative;
-using ::art::kStatic;
-using ::art::kSuper;
-using ::art::kVirtual;
-using ::art::mirror::AbstractMethod;
-using ::art::mirror::Array;
-using ::art::mirror::Class;
-using ::art::mirror::Field;
-using ::art::mirror::Object;
-using ::art::mirror::Throwable;
-
-//----------------------------------------------------------------------------
-// Thread
-//----------------------------------------------------------------------------
-
-Thread* art_portable_get_current_thread_from_code() {
-#if defined(__arm__) || defined(__i386__)
- LOG(FATAL) << "UNREACHABLE";
-#endif
- return Thread::Current();
-}
-
-void* art_portable_set_current_thread_from_code(void* thread_object_addr) {
- // Hijacked to set r9 on ARM.
- LOG(FATAL) << "UNREACHABLE";
- return NULL;
-}
-
-void art_portable_lock_object_from_code(Object* obj, Thread* thread)
- EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
- DCHECK(obj != NULL); // Assumed to have been checked before entry
- obj->MonitorEnter(thread); // May block
- DCHECK(thread->HoldsLock(obj));
- // Only possible exception is NPE and is handled before entry
- DCHECK(!thread->IsExceptionPending());
-}
-
-void art_portable_unlock_object_from_code(Object* obj, Thread* thread)
- UNLOCK_FUNCTION(monitor_lock_) {
- DCHECK(obj != NULL); // Assumed to have been checked before entry
- // MonitorExit may throw exception
- obj->MonitorExit(thread);
-}
-
-void art_portable_test_suspend_from_code(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckSuspend(self);
- if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
- // Save out the shadow frame to the heap
- ShadowFrameCopyVisitor visitor(self);
- visitor.WalkStack(true);
- self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
- self->SetDeoptimizationReturnValue(JValue());
- self->SetException(ThrowLocation(), reinterpret_cast<Throwable*>(-1));
- }
-}
-
-ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread,
- ShadowFrame* new_shadow_frame,
- AbstractMethod* method,
- uint32_t num_vregs) {
- ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
- new_shadow_frame->SetMethod(method);
- new_shadow_frame->SetNumberOfVRegs(num_vregs);
- return old_frame;
-}
-
-void art_portable_pop_shadow_frame_from_code(void*) {
- LOG(FATAL) << "Implemented by IRBuilder.";
-}
-
-void art_portable_mark_gc_card_from_code(void *, void*) {
- LOG(FATAL) << "Implemented by IRBuilder.";
-}
-
-//----------------------------------------------------------------------------
-// Exception
-//----------------------------------------------------------------------------
-
-bool art_portable_is_exception_pending_from_code() {
- LOG(FATAL) << "Implemented by IRBuilder.";
- return false;
-}
-
-void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowArithmeticExceptionDivideByZero();
-}
-
-void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowArrayIndexOutOfBoundsException(index, length);
-}
-
-void art_portable_throw_no_such_method_from_code(int32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowNoSuchMethodError(method_idx);
-}
-
-void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO: remove dex_pc argument from caller.
- UNUSED(dex_pc);
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionFromDexPC(throw_location);
-}
-
-void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowStackOverflowError(Thread::Current());
-}
-
-void art_portable_throw_exception_from_code(Throwable* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- if (exception == NULL) {
- ThrowNullPointerException(NULL, "throw with null exception");
- } else {
- self->SetException(throw_location, exception);
- }
-}
-
-void* art_portable_get_and_clear_exception(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(self->IsExceptionPending());
- // TODO: make this inline.
- Throwable* exception = self->GetException(NULL);
- self->ClearException();
- return exception;
-}
-
-int32_t art_portable_find_catch_block_from_code(AbstractMethod* current_method,
- uint32_t ti_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current(); // TODO: make an argument.
- ThrowLocation throw_location;
- Throwable* exception = self->GetException(&throw_location);
- // Check for special deoptimization exception.
- if (UNLIKELY(reinterpret_cast<int32_t>(exception) == -1)) {
- return -1;
- }
- Class* exception_type = exception->GetClass();
- MethodHelper mh(current_method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- DCHECK_LT(ti_offset, code_item->tries_size_);
- const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset);
-
- int iter_index = 0;
- int result = -1;
- uint32_t catch_dex_pc = -1;
- // Iterate over the catch handlers associated with dex_pc
- for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) {
- uint16_t iter_type_idx = it.GetHandlerTypeIndex();
- // Catch all case
- if (iter_type_idx == DexFile::kDexNoIndex16) {
- catch_dex_pc = it.GetHandlerAddress();
- result = iter_index;
- break;
- }
- // Does this catch exception type apply?
- Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
- if (UNLIKELY(iter_exception_type == NULL)) {
- // TODO: check, the verifier (class linker?) should take care of resolving all exception
- // classes early.
- LOG(WARNING) << "Unresolved exception class when finding catch block: "
- << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
- } else if (iter_exception_type->IsAssignableFrom(exception_type)) {
- catch_dex_pc = it.GetHandlerAddress();
- result = iter_index;
- break;
- }
- ++iter_index;
- }
- if (result != -1) {
- // Handler found.
- Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self,
- throw_location,
- current_method,
- catch_dex_pc,
- exception);
- // If the catch block has no move-exception then clear the exception for it.
- const Instruction* first_catch_instr =
- Instruction::At(&mh.GetCodeItem()->insns_[catch_dex_pc]);
- if (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION) {
- self->ClearException();
- }
- }
- return result;
-}
-
-
-//----------------------------------------------------------------------------
-// Object Space
-//----------------------------------------------------------------------------
-
-Object* art_portable_alloc_object_from_code(uint32_t type_idx, AbstractMethod* referrer, Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectFromCode(type_idx, referrer, thread, false);
-}
-
-Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectFromCode(type_idx, referrer, thread, true);
-}
-
-Object* art_portable_alloc_array_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocArrayFromCode(type_idx, referrer, length, self, false);
-}
-
-Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocArrayFromCode(type_idx, referrer, length, self, true);
-}
-
-Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
-}
-
-Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
-}
-
-static AbstractMethod* FindMethodHelper(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* caller_method,
- bool access_check,
- InvokeType type,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* method = FindMethodFast(method_idx,
- this_object,
- caller_method,
- access_check,
- type);
- if (UNLIKELY(method == NULL)) {
- method = FindMethodFromCode(method_idx, this_object, caller_method,
- thread, access_check, type);
- if (UNLIKELY(method == NULL)) {
- CHECK(thread->IsExceptionPending());
- return 0; // failure
- }
- }
- DCHECK(!thread->IsExceptionPending());
- const void* code = method->GetEntryPointFromCompiledCode();
-
- // When we return, the caller will branch to this address, so it had better not be 0!
- if (UNLIKELY(code == NULL)) {
- MethodHelper mh(method);
- LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
- << " location: " << mh.GetDexFile().GetLocation();
- }
- return method;
-}
-
-Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
-}
-
-Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
-}
-
-Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
-}
-
-Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
-}
-
-Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
-}
-
-Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
-}
-
-Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
-}
-
-Object* art_portable_initialize_type_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
-}
-
-Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Called when caller isn't guaranteed to have access to a type and the dex cache may be
- // unpopulated
- return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
-}
-
-Object* art_portable_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveStringFromCode(referrer, string_idx);
-}
-
-int32_t art_portable_set32_static_from_code(uint32_t field_idx,
- AbstractMethod* referrer,
- int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx,
- referrer,
- StaticPrimitiveWrite,
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- field->Set32(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx,
- referrer,
- Thread::Current(),
- StaticPrimitiveWrite,
- sizeof(uint32_t),
- true);
- if (LIKELY(field != NULL)) {
- field->Set32(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set64_static_from_code(uint32_t field_idx,
- AbstractMethod* referrer,
- int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- field->Set64(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx,
- referrer,
- Thread::Current(),
- StaticPrimitiveWrite,
- sizeof(uint64_t),
- true);
- if (LIKELY(field != NULL)) {
- field->Set64(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set_obj_static_from_code(uint32_t field_idx,
- AbstractMethod* referrer,
- Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- field->SetObj(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticObjectWrite, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- field->SetObj(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(field->GetDeclaringClass());
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticPrimitiveRead, sizeof(uint32_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get32(field->GetDeclaringClass());
- }
- return 0;
-}
-
-int64_t art_portable_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(field->GetDeclaringClass());
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticPrimitiveRead, sizeof(uint64_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get64(field->GetDeclaringClass());
- }
- return 0;
-}
-
-Object* art_portable_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- return field->GetObj(field->GetDeclaringClass());
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticObjectRead, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- return field->GetObj(field->GetDeclaringClass());
- }
- return 0;
-}
-
-int32_t art_portable_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, uint32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- field->Set32(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveWrite, sizeof(uint32_t), true);
- if (LIKELY(field != NULL)) {
- field->Set32(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- field->Set64(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveWrite, sizeof(uint64_t), true);
- if (LIKELY(field != NULL)) {
- field->Set64(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- field->SetObj(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstanceObjectWrite, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- field->SetObj(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(obj);
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveRead, sizeof(uint32_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get32(obj);
- }
- return 0;
-}
-
-int64_t art_portable_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(obj);
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveRead, sizeof(uint64_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get64(obj);
- }
- return 0;
-}
-
-Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- return field->GetObj(obj);
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstanceObjectRead, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- return field->GetObj(obj);
- }
- return 0;
-}
-
-void art_portable_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc,
- Array* array, uint32_t payload_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Test: Is array equal to null? (Guard NullPointerException)
- if (UNLIKELY(array == NULL)) {
- art_portable_throw_null_pointer_exception_from_code(dex_pc);
- return;
- }
-
- // Find the payload from the CodeItem
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
-
- DCHECK_GT(code_item->insns_size_in_code_units_, payload_offset);
-
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(
- code_item->insns_ + payload_offset);
-
- DCHECK_EQ(payload->ident,
- static_cast<uint16_t>(Instruction::kArrayDataSignature));
-
- // Test: Is array big enough?
- uint32_t array_len = static_cast<uint32_t>(array->GetLength());
- if (UNLIKELY(array_len < payload->element_count)) {
- int32_t last_index = payload->element_count - 1;
- art_portable_throw_array_bounds_from_code(array_len, last_index);
- return;
- }
-
- // Copy the data
- size_t size = payload->element_width * payload->element_count;
- memcpy(array->GetRawData(payload->element_width), payload->data, size);
-}
-
-
-
-//----------------------------------------------------------------------------
-// Type checking, in the nature of casting
-//----------------------------------------------------------------------------
-
-int32_t art_portable_is_assignable_from_code(const Class* dest_type, const Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(dest_type != NULL);
- DCHECK(src_type != NULL);
- return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
-}
-
-void art_portable_check_cast_from_code(const Class* dest_type, const Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
- DCHECK(src_type->IsClass()) << PrettyClass(src_type);
- if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) {
- ThrowClassCastException(dest_type, src_type);
- }
-}
-
-void art_portable_check_put_array_element_from_code(const Object* element,
- const Object* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (element == NULL) {
- return;
- }
- DCHECK(array != NULL);
- Class* array_class = array->GetClass();
- DCHECK(array_class != NULL);
- Class* component_type = array_class->GetComponentType();
- Class* element_class = element->GetClass();
- if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
- ThrowArrayStoreException(element_class, array_class);
- }
- return;
-}
-
-//----------------------------------------------------------------------------
-// JNI
-//----------------------------------------------------------------------------
-
-// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
-uint32_t art_portable_jni_method_start(Thread* self)
- UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) {
- JNIEnvExt* env = self->GetJniEnv();
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
- env->local_ref_cookie = env->locals.GetSegmentState();
- self->TransitionFromRunnableToSuspended(kNative);
- return saved_local_ref_cookie;
-}
-
-uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self)
- UNLOCK_FUNCTION(Locks::mutator_lock_) {
- self->DecodeJObject(to_lock)->MonitorEnter(self);
- return art_portable_jni_method_start(self);
-}
-
-static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
- JNIEnvExt* env = self->GetJniEnv();
- env->locals.SetSegmentState(env->local_ref_cookie);
- env->local_ref_cookie = saved_local_ref_cookie;
-}
-
-void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- PopLocalReferences(saved_local_ref_cookie, self);
-}
-
-
-void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie,
- jobject locked,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- PopLocalReferences(saved_local_ref_cookie, self);
-}
-
-Object* art_portable_jni_method_end_with_reference(jobject result,
- uint32_t saved_local_ref_cookie,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- Object* o = self->DecodeJObject(result); // Must decode before pop.
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- return o;
-}
-
-Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
- uint32_t saved_local_ref_cookie,
- jobject locked,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- Object* o = self->DecodeJObject(result);
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- return o;
-}
-
-// Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation
-// handler which is a field within the proxy object receiver. The var args encode the arguments
-// with the last argument being a pointer to a JValue to store the result in.
-void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- va_list ap;
- va_start(ap, proxy_method);
-
- Object* receiver = va_arg(ap, Object*);
- Thread* self = va_arg(ap, Thread*);
- MethodHelper proxy_mh(proxy_method);
-
- // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
- const char* old_cause =
- self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
- self->VerifyStack();
-
- // Start new JNI local reference state.
- JNIEnvExt* env = self->GetJniEnv();
- ScopedObjectAccessUnchecked soa(env);
- ScopedJniEnvLocalRefState env_state(env);
-
- // Create local ref. copies of the receiver.
- jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
- // Convert proxy method into expected interface method.
- AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
- DCHECK(interface_method != NULL);
- DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
- // Record arguments and turn Object* arguments into jobject to survive GC.
- std::vector<jvalue> args;
- const size_t num_params = proxy_mh.NumArgs();
- for (size_t i = 1; i < num_params; ++i) {
- jvalue val;
- switch (proxy_mh.GetParamPrimitiveType(i)) {
- case Primitive::kPrimNot:
- val.l = soa.AddLocalReference<jobject>(va_arg(ap, Object*));
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- val.i = va_arg(ap, jint);
- break;
- case Primitive::kPrimFloat:
- // TODO: should this be jdouble? Floats aren't passed to var arg routines.
- val.i = va_arg(ap, jint);
- break;
- case Primitive::kPrimDouble:
- val.d = (va_arg(ap, jdouble));
- break;
- case Primitive::kPrimLong:
- val.j = (va_arg(ap, jlong));
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
- }
- args.push_back(val);
- }
- self->EndAssertNoThreadSuspension(old_cause);
- JValue* result_location = NULL;
- const char* shorty = proxy_mh.GetShorty();
- if (shorty[0] != 'V') {
- result_location = va_arg(ap, JValue*);
- }
- va_end(ap);
- JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
- if (result_location != NULL) {
- *result_location = result;
- }
-}
-
-//----------------------------------------------------------------------------
-// Memory barrier
-//----------------------------------------------------------------------------
-
-void art_portable_constructor_barrier() {
- LOG(FATAL) << "Implemented by IRBuilder.";
-}
-} // extern "C"
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d5fdd20..97a1410 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -38,6 +38,7 @@
#include "cutils/atomic-inline.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc_map.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -54,7 +55,6 @@
#include "object_utils.h"
#include "reflection.h"
#include "runtime.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
@@ -86,16 +86,23 @@ static void UnimplementedEntryPoint() {
}
#endif
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints);
+
void Thread::InitFunctionPointers() {
#if !defined(__APPLE__) // The Mac GCC is too old to accept this code.
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
- uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_);
- uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_));
+ uintptr_t* begin = reinterpret_cast<uintptr_t*>(&quick_entrypoints_);
+ uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
+ for (uintptr_t* it = begin; it != end; ++it) {
+ *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
+ }
+ begin = reinterpret_cast<uintptr_t*>(&portable_entrypoints_);
+ end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
for (uintptr_t* it = begin; it != end; ++it) {
*it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
}
#endif
- InitEntryPoints(&entrypoints_);
+ InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_);
}
void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
@@ -1582,86 +1589,87 @@ struct EntryPointInfo {
uint32_t offset;
const char* name;
};
-#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x }
+#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x }
+#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x }
static const EntryPointInfo gThreadEntryPointInfo[] = {
- ENTRY_POINT_INFO(pAllocArrayFromCode),
- ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
- ENTRY_POINT_INFO(pAllocObjectFromCode),
- ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
- ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
- ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
- ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
- ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
- ENTRY_POINT_INFO(pCheckCastFromCode),
- ENTRY_POINT_INFO(pInitializeStaticStorage),
- ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
- ENTRY_POINT_INFO(pInitializeTypeFromCode),
- ENTRY_POINT_INFO(pResolveStringFromCode),
- ENTRY_POINT_INFO(pSet32Instance),
- ENTRY_POINT_INFO(pSet32Static),
- ENTRY_POINT_INFO(pSet64Instance),
- ENTRY_POINT_INFO(pSet64Static),
- ENTRY_POINT_INFO(pSetObjInstance),
- ENTRY_POINT_INFO(pSetObjStatic),
- ENTRY_POINT_INFO(pGet32Instance),
- ENTRY_POINT_INFO(pGet32Static),
- ENTRY_POINT_INFO(pGet64Instance),
- ENTRY_POINT_INFO(pGet64Static),
- ENTRY_POINT_INFO(pGetObjInstance),
- ENTRY_POINT_INFO(pGetObjStatic),
- ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
- ENTRY_POINT_INFO(pJniMethodStart),
- ENTRY_POINT_INFO(pJniMethodStartSynchronized),
- ENTRY_POINT_INFO(pJniMethodEnd),
- ENTRY_POINT_INFO(pJniMethodEndSynchronized),
- ENTRY_POINT_INFO(pJniMethodEndWithReference),
- ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
- ENTRY_POINT_INFO(pLockObjectFromCode),
- ENTRY_POINT_INFO(pUnlockObjectFromCode),
- ENTRY_POINT_INFO(pCmpgDouble),
- ENTRY_POINT_INFO(pCmpgFloat),
- ENTRY_POINT_INFO(pCmplDouble),
- ENTRY_POINT_INFO(pCmplFloat),
- ENTRY_POINT_INFO(pFmod),
- ENTRY_POINT_INFO(pSqrt),
- ENTRY_POINT_INFO(pL2d),
- ENTRY_POINT_INFO(pFmodf),
- ENTRY_POINT_INFO(pL2f),
- ENTRY_POINT_INFO(pD2iz),
- ENTRY_POINT_INFO(pF2iz),
- ENTRY_POINT_INFO(pIdivmod),
- ENTRY_POINT_INFO(pD2l),
- ENTRY_POINT_INFO(pF2l),
- ENTRY_POINT_INFO(pLdiv),
- ENTRY_POINT_INFO(pLdivmod),
- ENTRY_POINT_INFO(pLmul),
- ENTRY_POINT_INFO(pShlLong),
- ENTRY_POINT_INFO(pShrLong),
- ENTRY_POINT_INFO(pUshrLong),
- ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
- ENTRY_POINT_INFO(pInterpreterToQuickEntry),
- ENTRY_POINT_INFO(pIndexOf),
- ENTRY_POINT_INFO(pMemcmp16),
- ENTRY_POINT_INFO(pStringCompareTo),
- ENTRY_POINT_INFO(pMemcpy),
- ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
- ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
- ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
- ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pCheckSuspendFromCode),
- ENTRY_POINT_INFO(pTestSuspendFromCode),
- ENTRY_POINT_INFO(pDeliverException),
- ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
- ENTRY_POINT_INFO(pThrowDivZeroFromCode),
- ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
- ENTRY_POINT_INFO(pThrowNullPointerFromCode),
- ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
+ QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode),
+ QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode),
+ QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
+ QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
+ QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
+ QUICK_ENTRY_POINT_INFO(pCheckCastFromCode),
+ QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
+ QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
+ QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode),
+ QUICK_ENTRY_POINT_INFO(pResolveStringFromCode),
+ QUICK_ENTRY_POINT_INFO(pSet32Instance),
+ QUICK_ENTRY_POINT_INFO(pSet32Static),
+ QUICK_ENTRY_POINT_INFO(pSet64Instance),
+ QUICK_ENTRY_POINT_INFO(pSet64Static),
+ QUICK_ENTRY_POINT_INFO(pSetObjInstance),
+ QUICK_ENTRY_POINT_INFO(pSetObjStatic),
+ QUICK_ENTRY_POINT_INFO(pGet32Instance),
+ QUICK_ENTRY_POINT_INFO(pGet32Static),
+ QUICK_ENTRY_POINT_INFO(pGet64Instance),
+ QUICK_ENTRY_POINT_INFO(pGet64Static),
+ QUICK_ENTRY_POINT_INFO(pGetObjInstance),
+ QUICK_ENTRY_POINT_INFO(pGetObjStatic),
+ QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
+ QUICK_ENTRY_POINT_INFO(pJniMethodStart),
+ QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
+ QUICK_ENTRY_POINT_INFO(pLockObjectFromCode),
+ QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode),
+ QUICK_ENTRY_POINT_INFO(pCmpgDouble),
+ QUICK_ENTRY_POINT_INFO(pCmpgFloat),
+ QUICK_ENTRY_POINT_INFO(pCmplDouble),
+ QUICK_ENTRY_POINT_INFO(pCmplFloat),
+ QUICK_ENTRY_POINT_INFO(pFmod),
+ QUICK_ENTRY_POINT_INFO(pSqrt),
+ QUICK_ENTRY_POINT_INFO(pL2d),
+ QUICK_ENTRY_POINT_INFO(pFmodf),
+ QUICK_ENTRY_POINT_INFO(pL2f),
+ QUICK_ENTRY_POINT_INFO(pD2iz),
+ QUICK_ENTRY_POINT_INFO(pF2iz),
+ QUICK_ENTRY_POINT_INFO(pIdivmod),
+ QUICK_ENTRY_POINT_INFO(pD2l),
+ QUICK_ENTRY_POINT_INFO(pF2l),
+ QUICK_ENTRY_POINT_INFO(pLdiv),
+ QUICK_ENTRY_POINT_INFO(pLdivmod),
+ QUICK_ENTRY_POINT_INFO(pLmul),
+ QUICK_ENTRY_POINT_INFO(pShlLong),
+ QUICK_ENTRY_POINT_INFO(pShrLong),
+ QUICK_ENTRY_POINT_INFO(pUshrLong),
+ QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
+ QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry),
+ QUICK_ENTRY_POINT_INFO(pIndexOf),
+ QUICK_ENTRY_POINT_INFO(pMemcmp16),
+ QUICK_ENTRY_POINT_INFO(pStringCompareTo),
+ QUICK_ENTRY_POINT_INFO(pMemcpy),
+ QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
+ QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
+ QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode),
+ QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode),
+ QUICK_ENTRY_POINT_INFO(pDeliverException),
+ QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
+ PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
};
-#undef ENTRY_POINT_INFO
+#undef QUICK_ENTRY_POINT_INFO
void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets.
@@ -1686,8 +1694,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_
#undef DO_THREAD_OFFSET
size_t entry_point_count = arraysize(gThreadEntryPointInfo);
- CHECK_EQ(entry_point_count * size_of_pointers, sizeof(QuickEntryPoints));
- uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_);
+ CHECK_EQ(entry_point_count * size_of_pointers,
+ sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints));
+ uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_);
for (size_t i = 0; i < entry_point_count; ++i) {
CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
expected_offset += size_of_pointers;
diff --git a/runtime/thread.h b/runtime/thread.h
index d02ab36..ff0fe22 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -26,9 +26,10 @@
#include <string>
#include "base/macros.h"
+#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "globals.h"
#include "jvalue.h"
-#include "entrypoints/quick/quick_entrypoints.h"
#include "locks.h"
#include "offsets.h"
#include "root_visitor.h"
@@ -773,9 +774,10 @@ class PACKED(4) Thread {
Closure* checkpoint_function_;
public:
- // Runtime support function pointers
+ // Entrypoint function pointers
// TODO: move this near the top, since changing its offset requires all oats to be recompiled!
- QuickEntryPoints entrypoints_;
+ QuickEntryPoints quick_entrypoints_;
+ PortableEntryPoints portable_entrypoints_;
private:
// How many times has our pthread key's destructor been called?