summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2014-02-26 11:24:15 +0000
committerVladimir Marko <vmarko@google.com>2014-03-03 12:55:45 +0000
commitbe0e546730e532ef0987cd4bde2c6f5a1b14dd2a (patch)
tree41aa0541ec85b8e26c5e50cc7341f506f5d52314 /compiler
parentcc261bfd336eddac18b85d4eb47f6c905d495241 (diff)
downloadart-be0e546730e532ef0987cd4bde2c6f5a1b14dd2a.zip
art-be0e546730e532ef0987cd4bde2c6f5a1b14dd2a.tar.gz
art-be0e546730e532ef0987cd4bde2c6f5a1b14dd2a.tar.bz2
Cache field lowering info in mir_graph.
Change-Id: I9f9d76e3ae6c31e88bdf3f59820d31a625da020f
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/dex/bb_optimizations.h14
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc8
-rw-r--r--compiler/dex/local_value_numbering.cc73
-rw-r--r--compiler/dex/local_value_numbering_test.cc75
-rw-r--r--compiler/dex/mir_analysis.cc109
-rw-r--r--compiler/dex/mir_field_info.cc121
-rw-r--r--compiler/dex/mir_field_info.h211
-rw-r--r--compiler/dex/mir_graph.cc4
-rw-r--r--compiler/dex/mir_graph.h22
-rw-r--r--compiler/dex/pass_driver.cc1
-rw-r--r--compiler/dex/quick/codegen_util.cc5
-rw-r--r--compiler/dex/quick/gen_common.cc124
-rw-r--r--compiler/dex/quick/mir_to_lir.cc36
-rw-r--r--compiler/dex/quick/mir_to_lir.h9
-rw-r--r--compiler/driver/compiler_driver-inl.h165
-rw-r--r--compiler/driver/compiler_driver.cc199
-rw-r--r--compiler/driver/compiler_driver.h52
-rw-r--r--compiler/llvm/gbc_expander.cc33
19 files changed, 983 insertions, 279 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index b1b6fc5..2f785ce 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -50,6 +50,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
dex/mir_dataflow.cc \
+ dex/mir_field_info.cc \
dex/mir_optimization.cc \
dex/pass_driver.cc \
dex/bb_optimizations.cc \
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 1286a8e..bd7c40b 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -23,6 +23,20 @@
namespace art {
/**
+ * @class CacheFieldLoweringInfo
+ * @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
+ */
+class CacheFieldLoweringInfo : public Pass {
+ public:
+ CacheFieldLoweringInfo() : Pass("CacheFieldLoweringInfo", kNoNodes) {
+ }
+
+ void Start(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->DoCacheFieldLoweringInfo();
+ }
+};
+
+/**
* @class CodeLayout
* @brief Perform the code layout pass.
*/
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ff8fea0..b9f9437 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -208,21 +208,21 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
return;
}
uint32_t field_idx = inst->VRegC_22c();
- int field_offset;
+ MemberOffset field_offset(0u);
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
&field_offset, &is_volatile);
- if (fast_path && !is_volatile && IsUint(16, field_offset)) {
+ if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
<< " by replacing field index " << field_idx
- << " by field offset " << field_offset
+ << " by field offset " << field_offset.Int32Value()
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
// We are modifying 4 consecutive bytes.
inst->SetOpcode(new_opcode);
// Replace field index by field offset.
- inst->SetVRegC_22c(static_cast<uint16_t>(field_offset));
+ inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
}
}
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a3ea034..61c6767 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -16,6 +16,7 @@
#include "local_value_numbering.h"
+#include "mir_field_info.h"
#include "mir_graph.h"
namespace art {
@@ -534,16 +535,24 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
+ uint16_t type = opcode - Instruction::IGET;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, base);
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
uint16_t memory_version;
uint16_t field_id;
- // TODO: all gets treated as volatile.
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields are always marked as volatile and handled the same way here.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ field_id = 0u;
+ memory_version = next_memory_version_;
+ ++next_memory_version_;
+ } else {
+ DCHECK(field_info.IsResolved());
+ field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+ memory_version = std::max(unresolved_ifield_version_[type],
+ GetMemoryVersion(base, field_id, type));
+ }
if (opcode == Instruction::IGET_WIDE) {
res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -567,10 +576,18 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
HandleNullCheck(mir, base);
- // TODO: all puts treated as unresolved.
- // Unresolved fields always alias with everything of the same type.
- unresolved_ifield_version_[type] = next_memory_version_;
- ++next_memory_version_;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ unresolved_ifield_version_[type] = next_memory_version_;
+ ++next_memory_version_;
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
+ field_info.DeclaringFieldIndex()), type);
+ }
}
break;
@@ -581,14 +598,22 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
+ uint16_t type = opcode - Instruction::SGET;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
uint16_t memory_version;
uint16_t field_id;
- // TODO: all gets treated as volatile.
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields are always marked as volatile and handled the same way here.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ field_id = 0u;
+ memory_version = next_memory_version_;
+ ++next_memory_version_;
+ } else {
+ DCHECK(field_info.IsResolved());
+ field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
+ memory_version = std::max(unresolved_sfield_version_[type],
+ GetMemoryVersion(NO_VALUE, field_id, type));
+ }
if (opcode == Instruction::SGET_WIDE) {
res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
@@ -609,10 +634,18 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
uint16_t type = opcode - Instruction::SPUT;
- // TODO: all puts treated as unresolved.
- // Unresolved fields always alias with everything of the same type.
- unresolved_sfield_version_[type] = next_memory_version_;
- ++next_memory_version_;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ unresolved_sfield_version_[type] = next_memory_version_;
+ ++next_memory_version_;
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
+ field_info.DeclaringFieldIndex()), type);
+ }
}
break;
}
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 6ab6c51..4599612 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -44,7 +44,7 @@ class LocalValueNumberingTest : public testing::Test {
Instruction::Code opcode;
int64_t value;
- uint32_t field_annotation;
+ uint32_t field_info;
size_t num_uses;
int32_t uses[kMaxSsaUses];
size_t num_defs;
@@ -55,28 +55,41 @@ class LocalValueNumberingTest : public testing::Test {
{ opcode, value, 0u, 0, { }, 1, { reg } }
#define DEF_CONST_WIDE(opcode, reg, value) \
{ opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_IGET(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 1, { obj }, 1, { reg } }
-#define DEF_IGET_WIDE(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 1, { obj }, 2, { reg, reg + 1 } }
-#define DEF_IPUT(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 2, { reg, obj }, 0, { } }
-#define DEF_IPUT_WIDE(opcode, reg, obj, field_annotation) \
- { opcode, 0u, field_annotation, 3, { reg, reg + 1, obj }, 0, { } }
-#define DEF_SGET(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 0, { }, 1, { reg } }
-#define DEF_SGET_WIDE(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 0, { }, 2, { reg, reg + 1 } }
-#define DEF_SPUT(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 1, { reg }, 0, { } }
-#define DEF_SPUT_WIDE(opcode, reg, field_annotation) \
- { opcode, 0u, field_annotation, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_IGET(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(opcode, reg, obj, field_info) \
+ { opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(opcode, reg, field_info) \
+ { opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
#define DEF_INVOKE1(opcode, reg) \
{ opcode, 0u, 0u, 1, { reg }, 0, { } }
#define DEF_UNIQUE_REF(opcode, reg) \
{ opcode, 0u, 0u, 0, { }, 1, { reg } } // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+ cu_.mir_graph->ifield_lowering_infos_.Reset();
+ cu_.mir_graph->ifield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const IFieldDef* def = &defs[i];
+ MirIFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = 0u | // Without kFlagIsStatic.
+ (def->is_volatile ? MirIFieldLoweringInfo::kFlagIsVolatile : 0u);
+ }
+ cu_.mir_graph->ifield_lowering_infos_.Insert(field_info);
+ }
}
template <size_t count>
@@ -85,6 +98,19 @@ class LocalValueNumberingTest : public testing::Test {
}
void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+ cu_.mir_graph->sfield_lowering_infos_.Reset();
+ cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const SFieldDef* def = &defs[i];
+ MirSFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic |
+ (def->is_volatile ? MirSFieldLoweringInfo::kFlagIsVolatile : 0u);
+ }
+ cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ }
}
template <size_t count>
@@ -102,6 +128,13 @@ class LocalValueNumberingTest : public testing::Test {
mir->dalvikInsn.opcode = def->opcode;
mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
mir->dalvikInsn.vB_wide = def->value;
+ if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.Size());
+ mir->meta.ifield_lowering_info = def->field_info;
+ } else if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ mir->meta.sfield_lowering_info = def->field_info;
+ }
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
@@ -146,7 +179,6 @@ class LocalValueNumberingTest : public testing::Test {
LocalValueNumbering lvn_;
};
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false }
@@ -169,7 +201,6 @@ TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
static const IFieldDef ifields[] = {
@@ -197,7 +228,6 @@ TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
EXPECT_EQ(mirs_[4].optimization_flags, 0u);
}
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -218,9 +248,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
EXPECT_EQ(mirs_[2].optimization_flags, 0u);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -241,9 +269,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
-#if 0 // TODO: re-enable when LVN is handling memory igets.
TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
@@ -267,7 +293,6 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-#endif
TEST_F(LocalValueNumberingTest, TestVolatile) {
static const IFieldDef ifields[] = {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 8ef80fa..d159f49 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -14,11 +14,15 @@
* limitations under the License.
*/
+#include <algorithm>
#include "compiler_internals.h"
#include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
+#include "UniquePtr.h"
namespace art {
@@ -1090,4 +1094,109 @@ bool MIRGraph::SkipCompilation() {
return ComputeSkipCompilation(&stats, skip_compilation);
}
+void MIRGraph::DoCacheFieldLoweringInfo() {
+ // Try to use stack-allocated array, resort to heap if we exceed the initial size.
+ static constexpr size_t kInitialSize = 32;
+ uint16_t stack_idxs[kInitialSize];
+ UniquePtr<uint16_t[]> allocated_idxs;
+ uint16_t* field_idxs = stack_idxs;
+ size_t size = kInitialSize;
+
+ // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
+ size_t ifield_pos = 0u;
+ size_t sfield_pos = size;
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->block_type != kDalvikByteCode) {
+ continue;
+ }
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::IGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ bool need_alloc = false;
+ const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+ uint16_t field_idx;
+ // Get field index and try to find it among existing indexes. If found, it's usually among
+ // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
+ // is a linear search, it actually performs much better than map based approach.
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idx = insn->VRegC_22c();
+ size_t i = ifield_pos;
+ while (i != 0u && field_idxs[i - 1] != field_idx) {
+ --i;
+ }
+ if (i != 0u) {
+ mir->meta.ifield_lowering_info = i - 1;
+ } else {
+ mir->meta.ifield_lowering_info = ifield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[ifield_pos++] = field_idx;
+ }
+ }
+ } else {
+ field_idx = insn->VRegB_21c();
+ size_t i = sfield_pos;
+ while (i != size && field_idxs[i] != field_idx) {
+ ++i;
+ }
+ if (i != size) {
+ mir->meta.sfield_lowering_info = size - i - 1u;
+ } else {
+ mir->meta.sfield_lowering_info = size - sfield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ }
+ if (UNLIKELY(need_alloc)) {
+ DCHECK(field_idxs == stack_idxs);
+ // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
+ uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
+ allocated_idxs.reset(new uint16_t[max_refs]);
+ field_idxs = allocated_idxs.get();
+ size_t sfield_count = size - sfield_pos;
+ sfield_pos = max_refs - sfield_count;
+ size = max_refs;
+ memcpy(field_idxs, stack_idxs, ifield_pos * sizeof(field_idxs[0]));
+ memcpy(field_idxs + sfield_pos, stack_idxs + ifield_pos,
+ sfield_count * sizeof(field_idxs[0]));
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idxs[ifield_pos++] = field_idx;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ DCHECK_LE(ifield_pos, sfield_pos);
+ }
+ }
+ }
+
+ if (ifield_pos != 0u) {
+ // Resolve instance field infos.
+ DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
+ ifield_lowering_infos_.Resize(ifield_pos);
+ for (size_t pos = 0u; pos != ifield_pos; ++pos) {
+ ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
+ }
+ MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ ifield_lowering_infos_.GetRawStorage(), ifield_pos);
+ }
+
+ if (sfield_pos != size) {
+ // Resolve static field infos.
+ DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
+ sfield_lowering_infos_.Resize(size - sfield_pos);
+ for (size_t pos = size; pos != sfield_pos;) {
+ --pos;
+ sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
+ }
+ MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
+ sfield_lowering_infos_.GetRawStorage(), size - sfield_pos);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
new file mode 100644
index 0000000..3c76130
--- /dev/null
+++ b/compiler/dex/mir_field_info.cc
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mir_field_info.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_driver-inl.h"
+#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "scoped_thread_state_change.h"
+#include "sirt_ref.h"
+
+namespace art {
+
+void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirIFieldLoweringInfo* field_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(field_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ MirIFieldLoweringInfo unresolved(it->field_idx_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve fields and record all available info.
+
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+ bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
+
+ std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
+ dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+ it->flags_ = 0u | // Without kFlagIsStatic.
+ (is_volatile ? kFlagIsVolatile : 0u) |
+ (fast_path.first ? kFlagFastGet : 0u) |
+ (fast_path.second ? kFlagFastPut : 0u);
+ }
+}
+
+void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+ const DexCompilationUnit* mUnit,
+ MirSFieldLoweringInfo* field_infos, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(field_infos != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ MirSFieldLoweringInfo unresolved(it->field_idx_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
+ compiler_driver->GetClassLoader(soa, mUnit));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ // Even if the referrer class is unresolved (i.e. we're compiling a method without class
+ // definition) we still want to resolve fields and record all available info.
+
+ for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, true);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ continue;
+ }
+ compiler_driver->GetResolvedFieldDexFileLocation(resolved_field,
+ &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_field_idx_);
+ bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field) ? 1u : 0u;
+
+ bool is_referrers_class, is_initialized;
+ std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
+ dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+ &it->storage_index_, &is_referrers_class, &is_initialized);
+ it->flags_ = kFlagIsStatic |
+ (is_volatile ? kFlagIsVolatile : 0u) |
+ (fast_path.first ? kFlagFastGet : 0u) |
+ (fast_path.second ? kFlagFastPut : 0u) |
+ (is_referrers_class ? kFlagIsReferrersClass : 0u) |
+ (is_initialized ? kFlagIsInitialized : 0u);
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
new file mode 100644
index 0000000..41cb4ce
--- /dev/null
+++ b/compiler/dex/mir_field_info.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+#define ART_COMPILER_DEX_MIR_FIELD_INFO_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+
+/*
+ * Field info is calculated from the perspective of the compilation unit that accesses
+ * the field and stored in that unit's MIRGraph. Therefore it does not need to reference the
+ * dex file or method for which it has been calculated. However, we do store the declaring
+ * field index, class index and dex file of the resolved field to help distinguish between fields.
+ */
+
+class MirFieldInfo {
+ public:
+ uint16_t FieldIndex() const {
+ return field_idx_;
+ }
+
+ bool IsStatic() const {
+ return (flags_ & kFlagIsStatic) != 0u;
+ }
+
+ bool IsResolved() const {
+ return declaring_dex_file_ != nullptr;
+ }
+
+ const DexFile* DeclaringDexFile() const {
+ return declaring_dex_file_;
+ }
+
+ uint16_t DeclaringClassIndex() const {
+ return declaring_class_idx_;
+ }
+
+ uint16_t DeclaringFieldIndex() const {
+ return declaring_field_idx_;
+ }
+
+ bool IsVolatile() const {
+ return (flags_ & kFlagIsVolatile) != 0u;
+ }
+
+ protected:
+ enum {
+ kBitIsStatic = 0,
+ kBitIsVolatile,
+ kFieldInfoBitEnd
+ };
+ static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
+ static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+
+ MirFieldInfo(uint16_t field_idx, uint16_t flags)
+ : field_idx_(field_idx),
+ flags_(flags),
+ declaring_field_idx_(0u),
+ declaring_class_idx_(0u),
+ declaring_dex_file_(nullptr) {
+ }
+
+ // Make copy-ctor/assign/dtor protected to avoid slicing.
+ MirFieldInfo(const MirFieldInfo& other) = default;
+ MirFieldInfo& operator=(const MirFieldInfo& other) = default;
+ ~MirFieldInfo() = default;
+
+ // The field index in the compiling method's dex file.
+ uint16_t field_idx_;
+ // Flags, for volatility and derived class data.
+ uint16_t flags_;
+ // The field index in the dex file that defines field, 0 if unresolved.
+ uint16_t declaring_field_idx_;
+ // The type index of the class declaring the field, 0 if unresolved.
+ uint16_t declaring_class_idx_;
+ // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ const DexFile* declaring_dex_file_;
+};
+
+class MirIFieldLoweringInfo : public MirFieldInfo {
+ public:
+ // For each requested instance field retrieve the field's declaring location (dex file, class
+ // index and field index) and volatility and compute the whether we can fast path the access
+ // with IGET/IPUT. For fast path fields, retrieve the field offset.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirIFieldLoweringInfo* field_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved instance field lowering info.
+ explicit MirIFieldLoweringInfo(uint16_t field_idx)
+ : MirFieldInfo(field_idx, kFlagIsVolatile), // Without kFlagIsStatic.
+ field_offset_(0u) {
+ }
+
+ bool FastGet() const {
+ return (flags_ & kFlagFastGet) != 0u;
+ }
+
+ bool FastPut() const {
+ return (flags_ & kFlagFastPut) != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ private:
+ enum {
+ kBitFastGet = kFieldInfoBitEnd,
+ kBitFastPut,
+ kIFieldLoweringInfoBitEnd
+ };
+ COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+ static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+
+ // The member offset of the field, 0u if unresolved.
+ MemberOffset field_offset_;
+
+ friend class LocalValueNumberingTest;
+};
+
+class MirSFieldLoweringInfo : public MirFieldInfo {
+ public:
+ // For each requested static field retrieve the field's declaring location (dex file, class
+ // index and field index) and volatility and compute the whether we can fast path the access with
+ // IGET/IPUT. For fast path fields (at least for IGET), retrieve the information needed for
+ // the field access, i.e. the field offset, whether the field is in the same class as the
+ // method being compiled, whether the declaring class can be safely assumed to be initialized
+ // and the type index of the declaring class in the compiled method's dex file.
+ static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
+ MirSFieldLoweringInfo* field_infos, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved static field lowering info.
+ explicit MirSFieldLoweringInfo(uint16_t field_idx)
+ : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic),
+ field_offset_(0u),
+ storage_index_(DexFile::kDexNoIndex) {
+ }
+
+ bool FastGet() const {
+ return (flags_ & kFlagFastGet) != 0u;
+ }
+
+ bool FastPut() const {
+ return (flags_ & kFlagFastPut) != 0u;
+ }
+
+ bool IsReferrersClass() const {
+ return (flags_ & kFlagIsReferrersClass) != 0u;
+ }
+
+ bool IsInitialized() const {
+ return (flags_ & kFlagIsInitialized) != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ uint32_t StorageIndex() const {
+ return storage_index_;
+ }
+
+ private:
+ enum {
+ kBitFastGet = kFieldInfoBitEnd,
+ kBitFastPut,
+ kBitIsReferrersClass,
+ kBitIsInitialized,
+ kSFieldLoweringInfoBitEnd
+ };
+ COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
+ static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
+ static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
+ static constexpr uint16_t kFlagIsInitialized = 1u << kBitIsInitialized;
+
+ // The member offset of the field, 0u if unresolved.
+ MemberOffset field_offset_;
+ // The type index of the declaring class in the compiling method's dex file,
+ // -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
+ uint32_t storage_index_;
+
+ friend class LocalValueNumberingTest;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_MIR_FIELD_INFO_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 2bfc154..46e854f 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -87,7 +87,9 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
compiler_temps_(arena, 6, kGrowableArrayMisc),
num_non_special_compiler_temps_(0),
max_available_non_special_compiler_temps_(0),
- punt_to_interpreter_(false) {
+ punt_to_interpreter_(false),
+ ifield_lowering_infos_(arena, 0u),
+ sfield_lowering_infos_(arena, 0u) {
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
- std::abs(static_cast<int>(kVRegTempBaseReg));
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 28e9470..d4aafbc 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -20,6 +20,8 @@
#include "dex_file.h"
#include "dex_instruction.h"
#include "compiler_ir.h"
+#include "mir_field_info.h"
+#include "invoke_type.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
@@ -258,6 +260,12 @@ struct MIR {
MIR* throw_insn;
// Fused cmp branch condition.
ConditionCode ccode;
+ // IGET/IPUT lowering info index, points to MIRGraph::ifield_lowering_infos_. Due to limit on
+ // the number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
+ uint32_t ifield_lowering_info;
+ // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
+ // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
+ uint32_t sfield_lowering_info;
} meta;
};
@@ -466,6 +474,18 @@ class MIRGraph {
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ void DoCacheFieldLoweringInfo();
+
+ const MirIFieldLoweringInfo& GetIFieldLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.Size());
+ return ifield_lowering_infos_.GetRawStorage()[mir->meta.ifield_lowering_info];
+ }
+
+ const MirSFieldLoweringInfo& GetSFieldLoweringInfo(MIR* mir) {
+ DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.Size());
+ return sfield_lowering_infos_.GetRawStorage()[mir->meta.sfield_lowering_info];
+ }
+
void InitRegLocations();
void RemapRegLocations();
@@ -923,6 +943,8 @@ class MIRGraph {
size_t max_available_non_special_compiler_temps_;
size_t max_available_special_compiler_temps_;
bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret.
+ GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
+ GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index b60f296..256bcb1 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -91,6 +91,7 @@ void PassDriver::CreatePasses() {
* - This is not yet an issue: no current pass would require it.
*/
static const Pass* const passes[] = {
+ GetPassInstance<CacheFieldLoweringInfo>(),
GetPassInstance<CodeLayout>(),
GetPassInstance<SSATransformation>(),
GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index b0b8d1e..db7bdc8 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -78,11 +78,6 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
}
-bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
- return cu_->compiler_driver->ComputeInstanceFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
-}
-
/* Remove a LIR from the list. */
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 0533fbf..49e3c6f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -381,20 +381,14 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
const int r_base_;
};
-void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
+ if (field_info.FastPut() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
@@ -407,7 +401,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized.
// TODO: remove initialized check now that we are initializing classes in the compiler driver.
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -420,9 +414,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!field_info.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -437,7 +431,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -449,16 +443,16 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
} else {
rl_src = LoadValue(rl_src, kAnyReg);
}
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
+ StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.low_reg,
rl_src.high_reg);
} else {
- StoreWordDisp(r_base, field_offset, rl_src.low_reg);
+ StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.low_reg);
}
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
@@ -471,24 +465,18 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pSet32Static));
- CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
+ CallRuntimeHelperImmRegLocation(setter_offset, field_info.FieldIndex(), rl_src, true);
}
}
-void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
+void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
+ if (field_info.FastGet() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (field_info.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
@@ -497,7 +485,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -510,9 +498,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!field_info.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -527,7 +515,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ field_info.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -535,14 +523,14 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
}
// r_base now holds static storage base
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
+ LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.low_reg,
rl_result.high_reg, INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_offset, rl_result.low_reg);
+ LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.low_reg);
}
FreeTemp(r_base);
if (is_long_or_double) {
@@ -556,7 +544,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
:(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pGet32Static));
- CallRuntimeHelperImm(getterOffset, field_idx, true);
+ CallRuntimeHelperImm(getterOffset, field_info.FieldIndex(), true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -698,18 +686,15 @@ void Mir2Lir::HandleThrowLaunchPads() {
}
}
-void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
-
- if (fast_path && !SLOW_FIELD_PATH) {
+ const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
+ if (field_info.FastGet() && !SLOW_FIELD_PATH) {
RegLocation rl_result;
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
@@ -717,17 +702,17 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
+ LoadBaseDispWide(rl_obj.low_reg, field_info.FieldOffset().Int32Value(), rl_result.low_reg,
rl_result.high_reg, rl_obj.s_reg_low);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
} else {
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_info.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
@@ -736,9 +721,9 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
+ LoadBaseDisp(rl_obj.low_reg, field_info.FieldOffset().Int32Value(), rl_result.low_reg,
kWord, rl_obj.s_reg_low);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
StoreValue(rl_dest, rl_result);
@@ -748,7 +733,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
- CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
+ CallRuntimeHelperImmRegLocation(getterOffset, field_info.FieldIndex(), rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -759,39 +744,37 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
}
}
-void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
- if (fast_path && !SLOW_FIELD_PATH) {
+ const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
+ cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
+ if (field_info.FastPut() && !SLOW_FIELD_PATH) {
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
- if (is_volatile) {
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_info.FieldOffset().Int32Value());
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- if (is_volatile) {
+ if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
- if (is_volatile) {
+ StoreBaseDisp(rl_obj.low_reg, field_info.FieldOffset().Int32Value(), rl_src.low_reg, kWord);
+ if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
@@ -803,7 +786,8 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
- CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
+ CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_info.FieldIndex(),
+ rl_obj, rl_src, true);
}
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 8c2ed36..00518bd 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -596,72 +596,72 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::IGET_OBJECT:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
break;
case Instruction::IGET_WIDE:
- GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+ GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
break;
case Instruction::IGET:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_CHAR:
- GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_SHORT:
- GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_BOOLEAN:
case Instruction::IGET_BYTE:
- GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
break;
case Instruction::IPUT_WIDE:
- GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+ GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
break;
case Instruction::IPUT_OBJECT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
break;
case Instruction::IPUT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_BOOLEAN:
case Instruction::IPUT_BYTE:
- GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_CHAR:
- GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_SHORT:
- GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::SGET_OBJECT:
- GenSget(vB, rl_dest, false, true);
+ GenSget(mir, rl_dest, false, true);
break;
case Instruction::SGET:
case Instruction::SGET_BOOLEAN:
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT:
- GenSget(vB, rl_dest, false, false);
+ GenSget(mir, rl_dest, false, false);
break;
case Instruction::SGET_WIDE:
- GenSget(vB, rl_dest, true, false);
+ GenSget(mir, rl_dest, true, false);
break;
case Instruction::SPUT_OBJECT:
- GenSput(vB, rl_src[0], false, true);
+ GenSput(mir, rl_src[0], false, true);
break;
case Instruction::SPUT:
@@ -669,11 +669,11 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT:
- GenSput(vB, rl_src[0], false, false);
+ GenSput(mir, rl_src[0], false, false);
break;
case Instruction::SPUT_WIDE:
- GenSput(vB, rl_src[0], true, false);
+ GenSput(mir, rl_src[0], true, false);
break;
case Instruction::INVOKE_STATIC_RANGE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 8f199f8..e230c9d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -395,7 +395,6 @@ class Mir2Lir : public Backend {
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
void MarkSafepointPC(LIR* inst);
- bool FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile);
void SetupResourceMasks(LIR* lir);
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
@@ -558,13 +557,13 @@ class Mir2Lir : public Backend {
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
void GenFilledNewArray(CallInfo* info);
- void GenSput(uint32_t field_idx, RegLocation rl_src,
+ void GenSput(MIR* mir, RegLocation rl_src,
bool is_long_or_double, bool is_object);
- void GenSget(uint32_t field_idx, RegLocation rl_dest,
+ void GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object);
- void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
- void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
new file mode 100644
index 0000000..d401398
--- /dev/null
+++ b/compiler/driver/compiler_driver-inl.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
+#define ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
+
+#include "compiler_driver.h"
+#include "dex/compiler_ir.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/art_field-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) {
+ return mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
+}
+
+inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& soa,
+ const DexCompilationUnit* mUnit) {
+ return soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
+}
+
+inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ const DexFile::MethodId& referrer_method_id =
+ mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
+ mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
+ *mUnit->GetDexFile(), referrer_method_id.class_idx_, dex_cache, class_loader);
+ DCHECK_EQ(referrer_class == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(referrer_class == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ }
+ return referrer_class;
+}
+
+inline mirror::ArtField* CompilerDriver::ResolveField(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static) {
+ DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
+ DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
+ *mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
+ DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return nullptr;
+ }
+ if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+ // ClassLinker can return a field of the wrong kind directly from the DexCache.
+ // Silently return nullptr on such incompatible class change.
+ return nullptr;
+ }
+ return resolved_field;
+}
+
+inline void CompilerDriver::GetResolvedFieldDexFileLocation(
+ mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
+ mirror::Class* declaring_class = resolved_field->GetDeclaringClass();
+ *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile();
+ *declaring_class_idx = declaring_class->GetDexTypeIndex();
+ *declaring_field_idx = resolved_field->GetDexFieldIndex();
+}
+
+inline bool CompilerDriver::IsFieldVolatile(mirror::ArtField* field) {
+ return field->IsVolatile();
+}
+
+inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset) {
+ DCHECK(!resolved_field->IsStatic());
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ bool fast_get = referrer_class != nullptr &&
+ referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache, field_idx);
+ bool fast_put = fast_get && (!resolved_field->IsFinal() || fields_class == referrer_class);
+ *field_offset = fast_get ? resolved_field->GetOffset() : MemberOffset(0u);
+ return std::make_pair(fast_get, fast_put);
+}
+
+inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized) {
+ DCHECK(resolved_field->IsStatic());
+ if (LIKELY(referrer_class != nullptr)) {
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ if (fields_class == referrer_class) {
+ *field_offset = resolved_field->GetOffset();
+ *storage_index = fields_class->GetDexTypeIndex();
+ *is_referrers_class = true; // implies no worrying about class initialization
+ *is_initialized = true;
+ return std::make_pair(true, true);
+ }
+ if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache, field_idx)) {
+ // We have the resolved field, we must make it into a index for the referrer
+ // in its static storage (which may fail if it doesn't have a slot for it)
+ // TODO: for images we can elide the static storage base null check
+ // if we know there's a non-null entry in the image
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ uint32_t storage_idx = DexFile::kDexNoIndex;
+ if (LIKELY(fields_class->GetDexCache() == dex_cache)) {
+ // common case where the dex cache of both the referrer and the field are the same,
+ // no need to search the dex file
+ storage_idx = fields_class->GetDexTypeIndex();
+ } else {
+ // Search dex file for localized ssb index, may fail if field's class is a parent
+ // of the class mentioned in the dex file and there is no dex cache entry.
+ const DexFile::StringId* string_id =
+ dex_file->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
+ if (string_id != nullptr) {
+ const DexFile::TypeId* type_id =
+ dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
+ if (type_id != nullptr) {
+ // medium path, needs check of static storage base being initialized
+ storage_idx = dex_file->GetIndexForTypeId(*type_id);
+ }
+ }
+ }
+ if (storage_idx != DexFile::kDexNoIndex) {
+ *field_offset = resolved_field->GetOffset();
+ *storage_index = storage_idx;
+ *is_referrers_class = false;
+ *is_initialized = fields_class->IsInitialized() &&
+ CanAssumeTypeIsPresentInDexCache(*dex_file, storage_idx);
+ return std::make_pair(true, !resolved_field->IsFinal());
+ }
+ }
+ }
+ // Conservative defaults.
+ *field_offset = MemberOffset(0u);
+ *storage_index = DexFile::kDexNoIndex;
+ *is_referrers_class = false;
+ *is_initialized = false;
+ return std::make_pair(false, false);
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 708cce6..501ea7c 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,6 +26,7 @@
#include "base/timing_logger.h"
#include "class_linker.h"
#include "compiler_backend.h"
+#include "compiler_driver-inl.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
#include "dex/verification_results.h"
@@ -901,6 +902,24 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
}
}
+void CompilerDriver::ProcessedInstanceField(bool resolved) {
+ if (!resolved) {
+ stats_->UnresolvedInstanceField();
+ } else {
+ stats_->ResolvedInstanceField();
+ }
+}
+
+void CompilerDriver::ProcessedStaticField(bool resolved, bool local) {
+ if (!resolved) {
+ stats_->UnresolvedStaticField();
+ } else if (local) {
+ stats_->ResolvedLocalStaticField();
+ } else {
+ stats_->ResolvedStaticField();
+ }
+}
+
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
SirtRef<mirror::DexCache>& dex_cache,
const DexCompilationUnit* mUnit)
@@ -918,15 +937,6 @@ static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
dex_cache, class_loader);
}
-static mirror::ArtField* ComputeFieldReferencedFromCompilingMethod(
- ScopedObjectAccess& soa, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- return mUnit->GetClassLinker()->ResolveField(*mUnit->GetDexFile(), field_idx, dex_cache,
- class_loader, is_static);
-}
-
static mirror::ArtMethod* ComputeMethodReferencedFromCompilingMethod(ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit,
uint32_t method_idx,
@@ -962,117 +972,80 @@ bool CompilerDriver::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
}
bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, bool* is_volatile) {
+ bool is_put, MemberOffset* field_offset,
+ bool* is_volatile) {
ScopedObjectAccess soa(Thread::Current());
- // Conservative defaults.
- *field_offset = -1;
- *is_volatile = true;
- // Try to resolve field and ignore if an Incompatible Class Change Error (ie is static).
- mirror::ArtField* resolved_field =
- ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx, false);
- if (resolved_field != NULL && !resolved_field->IsStatic()) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(),
- resolved_field->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- if (referrer_class != NULL) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache.get(), field_idx);
- bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal() &&
- fields_class != referrer_class;
- if (access_ok && !is_write_to_final_from_wrong_class) {
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- stats_->ResolvedInstanceField();
- return true; // Fast path.
- }
- }
- }
- // Clean up any exception left by field/type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ // Try to resolve the field and compiling method's class.
+ mirror::ArtField* resolved_field;
+ mirror::Class* referrer_class;
+ mirror::DexCache* dex_cache;
+ {
+ SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
+ ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, false));
+ referrer_class = (resolved_field_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
+ resolved_field = resolved_field_sirt.get();
+ dex_cache = dex_cache_sirt.get();
}
- stats_->UnresolvedInstanceField();
- return false; // Incomplete knowledge needs slow path.
+ bool result = false;
+ if (resolved_field != nullptr && referrer_class != nullptr) {
+ *is_volatile = IsFieldVolatile(resolved_field);
+ std::pair<bool, bool> fast_path = IsFastInstanceField(
+ dex_cache, referrer_class, resolved_field, field_idx, field_offset);
+ result = is_put ? fast_path.second : fast_path.first;
+ }
+ if (!result) {
+ // Conservative defaults.
+ *is_volatile = true;
+ *field_offset = MemberOffset(static_cast<size_t>(-1));
+ }
+ ProcessedInstanceField(result);
+ return result;
}
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, int* storage_index,
- bool* is_referrers_class, bool* is_volatile,
- bool* is_initialized) {
+ bool is_put, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class,
+ bool* is_volatile, bool* is_initialized) {
ScopedObjectAccess soa(Thread::Current());
- // Conservative defaults.
- *field_offset = -1;
- *storage_index = -1;
- *is_referrers_class = false;
- *is_volatile = true;
- *is_initialized = false;
- // Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
- mirror::ArtField* resolved_field =
- ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx, true);
- if (resolved_field != NULL && resolved_field->IsStatic()) {
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), resolved_field->GetDeclaringClass()->GetDexCache());
- mirror::Class* referrer_class =
- ComputeCompilingMethodsClass(soa, dex_cache, mUnit);
- if (referrer_class != NULL) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- if (fields_class == referrer_class) {
- *is_referrers_class = true; // implies no worrying about class initialization
- *is_initialized = true;
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- stats_->ResolvedLocalStaticField();
- return true; // fast path
- } else {
- bool access_ok = referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache.get(), field_idx);
- bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal();
- if (access_ok && !is_write_to_final_from_wrong_class) {
- // We have the resolved field, we must make it into a index for the referrer
- // in its static storage (which may fail if it doesn't have a slot for it)
- // TODO: for images we can elide the static storage base null check
- // if we know there's a non-null entry in the image
- mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
- if (fields_class->GetDexCache() == dex_cache) {
- // common case where the dex cache of both the referrer and the field are the same,
- // no need to search the dex file
- *storage_index = fields_class->GetDexTypeIndex();
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
- stats_->ResolvedStaticField();
- return true;
- }
- // Search dex file for localized ssb index, may fail if field's class is a parent
- // of the class mentioned in the dex file and there is no dex cache entry.
- const DexFile::StringId* string_id =
- mUnit->GetDexFile()->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
- if (string_id != NULL) {
- const DexFile::TypeId* type_id =
- mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
- if (type_id != NULL) {
- // medium path, needs check of static storage base being initialized
- *storage_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
- *field_offset = resolved_field->GetOffset().Int32Value();
- *is_volatile = resolved_field->IsVolatile();
- *is_initialized = fields_class->IsInitialized() &&
- CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
- stats_->ResolvedStaticField();
- return true;
- }
- }
- }
- }
- }
- }
- // Clean up any exception left by field/type resolution
- if (soa.Self()->IsExceptionPending()) {
- soa.Self()->ClearException();
+ // Try to resolve the field and compiling method's class.
+ mirror::ArtField* resolved_field;
+ mirror::Class* referrer_class;
+ mirror::DexCache* dex_cache;
+ {
+ SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
+ mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
+ ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, true));
+ referrer_class = (resolved_field_sirt.get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
+ resolved_field = resolved_field_sirt.get();
+ dex_cache = dex_cache_sirt.get();
}
- stats_->UnresolvedStaticField();
- return false; // Incomplete knowledge needs slow path.
+ bool result = false;
+ if (resolved_field != nullptr && referrer_class != nullptr) {
+ *is_volatile = IsFieldVolatile(resolved_field);
+ std::pair<bool, bool> fast_path = IsFastStaticField(
+ dex_cache, referrer_class, resolved_field, field_idx, field_offset,
+ storage_index, is_referrers_class, is_initialized);
+ result = is_put ? fast_path.second : fast_path.first;
+ }
+ if (!result) {
+ // Conservative defaults.
+ *is_volatile = true;
+ *field_offset = MemberOffset(static_cast<size_t>(-1));
+ *storage_index = -1;
+ *is_referrers_class = false;
+ *is_initialized = false;
+ }
+ ProcessedStaticField(result, *is_referrers_class);
+ return result;
}
void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 6ccbf0f..57c2908 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -51,6 +51,7 @@ class DexFileToMethodInlinerMap;
struct InlineIGetIPutData;
class OatWriter;
class ParallelCompilationManager;
+class ScopedObjectAccess;
class TimingLogger;
class VerificationResults;
class VerifiedMethod;
@@ -203,6 +204,53 @@ class CompilerDriver {
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr);
+ // Get the DexCache for the
+ mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolve compiling method's class. Returns nullptr on failure.
+ mirror::Class* ResolveCompilingMethodsClass(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
+ mirror::ArtField* ResolveField(
+ ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
+ const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ uint32_t field_idx, bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get declaration location of a resolved field.
+ void GetResolvedFieldDexFileLocation(
+ mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
+ uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
+ std::pair<bool, bool> IsFastInstanceField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the field offset,
+ // the type index of the declaring class in the referrer's dex file and whether the declaring
+ // class is the referrer's class or at least can be assumed to be initialized.
+ std::pair<bool, bool> IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtField* resolved_field, uint16_t field_idx, MemberOffset* field_offset,
+ uint32_t* storage_index, bool* is_referrers_class, bool* is_initialized)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void ProcessedInstanceField(bool resolved);
+ void ProcessedStaticField(bool resolved, bool local);
+
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
@@ -212,13 +260,13 @@ class CompilerDriver {
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, bool* is_volatile)
+ MemberOffset* field_offset, bool* is_volatile)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath static field access? Computes field's offset, volatility and whether the
// field is within the referrer (which can avoid checking class initialization).
bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, int* storage_index,
+ MemberOffset* field_offset, uint32_t* storage_index,
bool* is_referrers_class, bool* is_volatile, bool* is_initialized)
LOCKS_EXCLUDED(Locks::mutator_lock_);
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index 8f22a97..cf28db3 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "dex_file.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
@@ -1602,7 +1603,7 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
llvm::Value* field_value;
- int field_offset;
+ art::MemberOffset field_offset(0u);
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
field_idx, dex_compilation_unit_, false, &field_offset, &is_volatile);
@@ -1633,12 +1634,12 @@ llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
field_value = irb_.CreateBitCast(field_value, irb_.getJType(field_jty));
}
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::PointerType* field_type =
irb_.getJType(field_jty)->getPointerTo();
- llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* field_addr =
irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
@@ -1664,7 +1665,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
- int field_offset;
+ art::MemberOffset field_offset(0u);
bool is_volatile;
bool is_fast_path = driver_->ComputeInstanceFieldInfo(
field_idx, dex_compilation_unit_, true, &field_offset, &is_volatile);
@@ -1698,7 +1699,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
EmitGuard_ExceptionLandingPad(dex_pc);
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
if (is_volatile) {
irb_.CreateMemoryBarrier(art::kStoreStore);
@@ -1707,7 +1708,7 @@ void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
llvm::PointerType* field_type =
irb_.getJType(field_jty)->getPointerTo();
- llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* field_addr =
irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
@@ -1875,8 +1876,8 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
- int field_offset;
- int ssb_index;
+ art::MemberOffset field_offset(0u);
+ uint32_t ssb_index;
bool is_referrers_class;
bool is_volatile;
bool is_initialized;
@@ -1913,7 +1914,7 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
static_field_value = irb_.CreateBitCast(static_field_value, irb_.getJType(field_jty));
}
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::Value* static_storage_addr = NULL;
@@ -1929,11 +1930,11 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssb_index, 0);
+ DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
}
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* static_field_addr =
irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
@@ -1960,8 +1961,8 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
new_value = irb_.CreateBitCast(new_value, irb_.getJType(field_jty));
}
- int field_offset;
- int ssb_index;
+ art::MemberOffset field_offset(0u);
+ uint32_t ssb_index;
bool is_referrers_class;
bool is_volatile;
bool is_initialized;
@@ -1999,7 +2000,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
EmitGuard_ExceptionLandingPad(dex_pc);
} else {
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(field_offset.Int32Value(), 0);
llvm::Value* static_storage_addr = NULL;
@@ -2015,7 +2016,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
} else {
// Medium path, static storage base in a different class which
// requires checks that the other class is initialized
- DCHECK_GE(ssb_index, 0);
+ DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
}
@@ -2023,7 +2024,7 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
irb_.CreateMemoryBarrier(art::kStoreStore);
}
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
+ llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
llvm::Value* static_field_addr =
irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,