summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Hao <jeffhao@google.com>2014-07-16 23:28:05 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-07-16 16:25:15 +0000
commit9791bb427fd812c1268edab6fb3ac7b82ad9fb93 (patch)
tree9467ee834502643a3b8b2261f0242a5f58ad61f9
parentebe8f799c5053f85adffee23cb46851efebbfd92 (diff)
parentd9cb8ae2ed78f957a773af61759432d7a7bf78af (diff)
downloadart-9791bb427fd812c1268edab6fb3ac7b82ad9fb93.zip
art-9791bb427fd812c1268edab6fb3ac7b82ad9fb93.tar.gz
art-9791bb427fd812c1268edab6fb3ac7b82ad9fb93.tar.bz2
Merge "Fix art test failures for Mips."
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h2
-rw-r--r--compiler/dex/quick/arm/target_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h2
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc4
-rw-r--r--compiler/dex/quick/gen_common.cc14
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h4
-rw-r--r--compiler/dex/quick/mips/target_mips.cc42
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc21
-rw-r--r--compiler/dex/quick/mir_to_lir.cc6
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h2
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc4
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc13
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h4
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/thread.cc2
17 files changed, 75 insertions, 57 deletions
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index d4b0de7..582af51 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -85,8 +85,6 @@ class ArmMir2Lir FINAL : public Mir2Lir {
size_t GetInsnSize(LIR* lir) OVERRIDE;
bool IsUnconditionalBranch(LIR* lir);
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index ef94bbc..8cc7596 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -538,10 +538,6 @@ bool ArmMir2Lir::IsUnconditionalBranch(LIR* lir) {
return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
}
-bool ArmMir2Lir::SupportsVolatileLoadStore(OpSize size) {
- return true;
-}
-
RegisterClass ArmMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
if (UNLIKELY(is_volatile)) {
// On arm, atomic 64-bit load/store requires a core register pair.
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 246b682..f51145c 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -146,8 +146,6 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
size_t GetInsnSize(LIR* lir) OVERRIDE;
bool IsUnconditionalBranch(LIR* lir);
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 19a3cf1..f1dc77a 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -569,10 +569,6 @@ bool Arm64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
return (lir->opcode == kA64B1t);
}
-bool Arm64Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
- return true;
-}
-
RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
if (UNLIKELY(is_volatile)) {
// On arm64, fp register load/store is atomic only for single bytes.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index cf80ee7..502859a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -582,8 +582,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastPut() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastPut()) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
RegStorage r_base;
if (field_info.IsReferrersClass()) {
@@ -683,8 +682,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastGet() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastGet()) {
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
RegStorage r_base;
if (field_info.IsReferrersClass()) {
@@ -788,6 +786,8 @@ static void GenIgetCall(Mir2Lir* mir_to_lir, bool is_long_or_double, bool is_obj
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pointer_size, pGetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pointer_size, pGet32Instance));
+ // Second argument of pGetXXInstance is always a reference.
+ DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
mir_to_lir->CallRuntimeHelperImmRegLocation(getter_offset, field_info->FieldIndex(), rl_obj,
true);
}
@@ -798,8 +798,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastGet() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(load_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastGet()) {
RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile());
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kRefReg);
@@ -855,8 +854,7 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
- if (!SLOW_FIELD_PATH && field_info.FastPut() &&
- (!field_info.IsVolatile() || SupportsVolatileLoadStore(store_size))) {
+ if (!SLOW_FIELD_PATH && field_info.FastPut()) {
RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile());
DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kRefReg);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2c33377..4a06086 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -47,6 +47,8 @@ class MipsMir2Lir FINAL : public Mir2Lir {
OpSize size) OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size) OVERRIDE;
+ LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+ LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
@@ -83,8 +85,6 @@ class MipsMir2Lir FINAL : public Mir2Lir {
size_t GetInsnSize(LIR* lir) OVERRIDE;
bool IsUnconditionalBranch(LIR* lir);
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index a5b7824..4ba94c4 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -496,6 +496,39 @@ LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
return inst;
}
+LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+ DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
+ DCHECK(r_dest.IsPair());
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Load));
+ LIR *ret = OpReg(kOpBlx, r_tgt);
+ RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+ OpRegCopyWide(r_dest, reg_ret);
+ return ret;
+}
+
+LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+ DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
+ DCHECK(r_src.IsPair());
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ RegStorage temp_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+ RegStorage temp_value = AllocTempWide();
+ OpRegCopyWide(temp_value, r_src);
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegCopy(reg_ptr, temp_ptr);
+ RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ OpRegCopyWide(reg_value, temp_value);
+ FreeTemp(temp_ptr);
+ FreeTemp(temp_value);
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Store));
+ return OpReg(kOpBlx, r_tgt);
+}
+
void MipsMir2Lir::SpillCoreRegs() {
if (num_core_spills_ == 0) {
return;
@@ -530,17 +563,12 @@ bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
return (lir->opcode == kMipsB);
}
-bool MipsMir2Lir::SupportsVolatileLoadStore(OpSize size) {
- // No support for 64-bit atomic load/store on mips.
- return size != k64 && size != kDouble;
-}
-
RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
if (UNLIKELY(is_volatile)) {
- // On Mips, atomic 64-bit load/store requires an fp register.
+ // On Mips, atomic 64-bit load/store requires a core register.
// Smaller aligned load/store is atomic for both core and fp registers.
if (size == k64 || size == kDouble) {
- return kFPReg;
+ return kCoreReg;
}
}
// TODO: Verify that both core and fp registers are suitable for smaller sizes.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 75d3c5d..0e8188b 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -551,8 +551,9 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) {
- if (is_volatile == kVolatile) {
- DCHECK(size != k64 && size != kDouble);
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+ // Do atomic 64-bit load.
+ return GenAtomic64Load(r_base, displacement, r_dest);
}
// TODO: base this on target.
@@ -654,17 +655,21 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size, VolatileKind is_volatile) {
if (is_volatile == kVolatile) {
- DCHECK(size != k64 && size != kDouble);
// Ensure that prior accesses become visible to other threads first.
GenMemBarrier(kAnyStore);
}
- // TODO: base this on target.
- if (size == kWord) {
- size = k32;
- }
LIR* store;
- store = StoreBaseDispBody(r_base, displacement, r_src, size);
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+ // Do atomic 64-bit load.
+ store = GenAtomic64Store(r_base, displacement, r_src);
+ } else {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
+ store = StoreBaseDispBody(r_base, displacement, r_src, size);
+ }
if (UNLIKELY(is_volatile == kVolatile)) {
// Preserve order with respect to any subsequent volatile loads.
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index edb3b23..ed7fcdd 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -226,9 +226,6 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
OpSize size = LoadStoreOpSize(wide, ref);
- if (data.is_volatile && !SupportsVolatileLoadStore(size)) {
- return false;
- }
// Point of no return - no aborts after this
GenPrintLabel(mir);
@@ -273,9 +270,6 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
OpSize size = LoadStoreOpSize(wide, ref);
- if (data.is_volatile && !SupportsVolatileLoadStore(size)) {
- return false;
- }
// Point of no return - no aborts after this
GenPrintLabel(mir);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4e4f110..5b56633 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1274,8 +1274,6 @@ class Mir2Lir : public Backend {
virtual size_t GetInsnSize(LIR* lir) = 0;
virtual bool IsUnconditionalBranch(LIR* lir) = 0;
- // Check support for volatile load/store of a given size.
- virtual bool SupportsVolatileLoadStore(OpSize size) = 0;
// Get the register class for load/store of a field.
virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 1f97429..cf4521a 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -145,8 +145,6 @@ class X86Mir2Lir : public Mir2Lir {
size_t GetInsnSize(LIR* lir) OVERRIDE;
bool IsUnconditionalBranch(LIR* lir);
- // Check support for volatile load/store of a given size.
- bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
// Get the register class for load/store of a field.
RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 61a0474..06001d7 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -755,10 +755,6 @@ bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
}
-bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
- return true;
-}
-
RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
// X86_64 can handle any size.
if (cu_->target64) {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 458d5b6..84f0b3c 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -186,7 +186,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(80U, sizeof(OatHeader));
EXPECT_EQ(8U, sizeof(OatMethodOffsets));
EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(77 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(79 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 7a2e961..d3e7d5e 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -21,6 +21,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
+#include "atomic.h"
namespace art {
@@ -196,11 +197,11 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pCmplDouble = CmplDouble;
qpoints->pCmplFloat = CmplFloat;
qpoints->pFmod = fmod;
- qpoints->pL2d = __floatdidf;
+ qpoints->pL2d = art_l2d;
qpoints->pFmodf = fmodf;
- qpoints->pL2f = __floatdisf;
- qpoints->pD2iz = __fixdfsi;
- qpoints->pF2iz = __fixsfsi;
+ qpoints->pL2f = art_l2f;
+ qpoints->pD2iz = art_d2i;
+ qpoints->pF2iz = art_f2i;
qpoints->pIdivmod = NULL;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
@@ -236,6 +237,10 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+
+ // Atomic 64-bit load/store
+ qpoints->pA64Load = QuasiAtomic::Read64;
+ qpoints->pA64Store = QuasiAtomic::Write64;
};
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 032f6be..473687c 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -138,6 +138,10 @@ struct PACKED(4) QuickEntryPoints {
void (*pThrowNoSuchMethod)(int32_t);
void (*pThrowNullPointer)();
void (*pThrowStackOverflow)(void*);
+
+ // Atomic 64-bit load/store
+ int64_t (*pA64Load)(volatile const int64_t *);
+ void (*pA64Store)(volatile int64_t *, int64_t);
};
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 79c68a2..ae1b94f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -259,8 +259,10 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, kPointerSize);
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, kPointerSize);
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, kPointerSize);
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pThrowStackOverflow)
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
+ kPointerSize == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6d3ba5d..f888029 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1931,6 +1931,8 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
+ QUICK_ENTRY_POINT_INFO(pA64Load)
+ QUICK_ENTRY_POINT_INFO(pA64Store)
#undef QUICK_ENTRY_POINT_INFO
os << offset;