summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2014-01-29 10:31:55 +0000
committerVladimir Marko <vmarko@google.com>2014-02-20 11:26:18 +0000
commit7f6cf56942c8469958b273ea968db253051c5b05 (patch)
tree8df31f196572acea32513083f98c1f387e581915 /compiler
parent3188d117d6f1ba5f3a30d0ff231d816ebb59a7f7 (diff)
downloadart-7f6cf56942c8469958b273ea968db253051c5b05.zip
art-7f6cf56942c8469958b273ea968db253051c5b05.tar.gz
art-7f6cf56942c8469958b273ea968db253051c5b05.tar.bz2
Annotate used fields.
Annotate all fields used by a method early during the compilation, check acces rights and record field offset, volatility, etc. Use these annotations when generating code for IGET/IPUT/SGET/SPUT instructions. Change-Id: I4bbf5cca4fecf53c9bf9c93ac1793e2f40c16b5f
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/dex/bb_optimizations.h14
-rw-r--r--compiler/dex/mir_analysis.cc110
-rw-r--r--compiler/dex/mir_annotations.cc195
-rw-r--r--compiler/dex/mir_annotations.h215
-rw-r--r--compiler/dex/mir_graph.cc4
-rw-r--r--compiler/dex/mir_graph.h22
-rw-r--r--compiler/dex/pass_driver.cc1
-rw-r--r--compiler/dex/quick/gen_common.cc124
-rw-r--r--compiler/dex/quick/mir_to_lir.cc36
-rw-r--r--compiler/dex/quick/mir_to_lir.h8
-rw-r--r--compiler/driver/compiler_driver.cc18
-rw-r--r--compiler/driver/compiler_driver.h3
13 files changed, 658 insertions, 93 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 77dc367..12b8afb 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -50,6 +50,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/x86/target_x86.cc \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
+ dex/mir_annotations.cc \
dex/mir_dataflow.cc \
dex/mir_optimization.cc \
dex/pass_driver.cc \
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 1286a8e..f336231 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -23,6 +23,20 @@
namespace art {
/**
+ * @class AnnotateUsedFields
+ * @brief Perform the annotation of fields for IGET/IPUT/SGET/SPUT insns.
+ */
+class AnnotateUsedFields : public Pass {
+ public:
+ AnnotateUsedFields() : Pass("AnnotateInstanceFields", kNoNodes) {
+ }
+
+ void Start(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->DoAnnotateUsedFields();
+ }
+};
+
+/**
* @class CodeLayout
* @brief Perform the code layout pass.
*/
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index ab55333..b739d5f 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -14,10 +14,14 @@
* limitations under the License.
*/
+#include <algorithm>
#include "compiler_internals.h"
#include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "UniquePtr.h"
namespace art {
@@ -1082,4 +1086,110 @@ bool MIRGraph::SkipCompilation(Runtime::CompilerFilter compiler_filter) {
return ComputeSkipCompilation(&stats, skip_compilation);
}
+void MIRGraph::DoAnnotateUsedFields() {
+ // Try to use stack-allocated array, resort to heap if we exceed the initial size.
+ static constexpr size_t kInitialSize = 32;
+ uint16_t stack_idxs[kInitialSize];
+ UniquePtr<uint16_t[]> allocated_idxs;
+ uint16_t* field_idxs = stack_idxs;
+ size_t size = kInitialSize;
+
+ // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
+ size_t ifield_pos = 0u;
+ size_t sfield_pos = size;
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->block_type != kDalvikByteCode) {
+ continue;
+ }
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::IGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ bool need_alloc = false;
+ const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
+ uint16_t field_idx;
+ // Get field index and try to find it among existing indexes. If found, it's usually among
+ // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
+ // is a linear search, it actually performs much better than map based approach.
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idx = insn->VRegC_22c();
+ size_t i = ifield_pos;
+ while (i != 0u && field_idxs[i - 1] != field_idx) {
+ --i;
+ }
+ if (i != 0u) {
+ mir->meta.ifield_annotation = i - 1;
+ } else {
+ mir->meta.ifield_annotation = ifield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[ifield_pos++] = field_idx;
+ }
+ }
+ } else {
+ field_idx = insn->VRegB_21c();
+ size_t i = sfield_pos;
+ while (i != size && field_idxs[i] != field_idx) {
+ ++i;
+ }
+ if (i != size) {
+ mir->meta.sfield_annotation = size - i - 1u;
+ } else {
+ mir->meta.sfield_annotation = size - sfield_pos;
+ if (UNLIKELY(ifield_pos == sfield_pos)) {
+ need_alloc = true;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ }
+ if (UNLIKELY(need_alloc)) {
+ DCHECK(field_idxs == stack_idxs);
+ // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
+ uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
+ allocated_idxs.reset(new uint16_t[max_refs]);
+ field_idxs = allocated_idxs.get();
+ size_t sfield_count = size - sfield_pos;
+ sfield_pos = max_refs - sfield_count;
+ size = max_refs;
+ memcpy(field_idxs, stack_idxs, ifield_pos * sizeof(field_idxs[0]));
+ memcpy(field_idxs + sfield_pos, stack_idxs + ifield_pos,
+ sfield_count * sizeof(field_idxs[0]));
+ if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
+ field_idxs[ifield_pos++] = field_idx;
+ } else {
+ field_idxs[--sfield_pos] = field_idx;
+ }
+ }
+ DCHECK_LE(ifield_pos, sfield_pos);
+ }
+ }
+ }
+
+ if (ifield_pos != 0u) {
+ // Annotate instance fields.
+ DCHECK_EQ(ifield_annotations_.Size(), 0u);
+ ifield_annotations_.Resize(ifield_pos);
+ for (size_t pos = 0u; pos != ifield_pos; ++pos) {
+ ifield_annotations_.Insert(IFieldAnnotation(field_idxs[pos]));
+ }
+ IFieldAnnotation::Resolve(GetCurrentDexCompilationUnit(),
+ ifield_annotations_.GetRawStorage(), ifield_pos);
+ }
+
+ if (sfield_pos != size) {
+ // Annotate static fields.
+ DCHECK_EQ(sfield_annotations_.Size(), 0u);
+ sfield_annotations_.Resize(size - sfield_pos);
+ for (size_t pos = size; pos != sfield_pos;) {
+ --pos;
+ sfield_annotations_.Insert(SFieldAnnotation(field_idxs[pos]));
+ }
+ SFieldAnnotation::Resolve(GetCurrentDexCompilationUnit(),
+ sfield_annotations_.GetRawStorage(),
+ size - sfield_pos);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_annotations.cc b/compiler/dex/mir_annotations.cc
new file mode 100644
index 0000000..eba64d7
--- /dev/null
+++ b/compiler/dex/mir_annotations.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mir_annotations.h"
+
+#include <string.h>
+
+#include "base/logging.h"
+#include "class_linker.h"
+#include "compiler_ir.h"
+#include "driver/dex_compilation_unit.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+void IFieldAnnotation::Resolve(const DexCompilationUnit* mUnit,
+ IFieldAnnotation* annotations, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(annotations != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = annotations, end = annotations + count; it != end; ++it) {
+ IFieldAnnotation unresolved(it->field_idx_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ const DexFile* dex_file = mUnit->GetDexFile();
+ ClassLinker* class_linker = mUnit->GetClassLinker();
+ uint32_t referrer_class_idx = dex_file->GetMethodId(mUnit->GetDexMethodIndex()).class_idx_;
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(*dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(
+ soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ class_linker->ResolveType(*dex_file, referrer_class_idx, dex_cache, class_loader));
+ if (UNLIKELY(referrer_class.get() == nullptr)) {
+ // Clean up any exception left by type resolution
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ // We're compiling a method without class definition. We may still resolve fields
+ // and update annotations, so fall through and check again in the loop.
+ }
+
+ for (auto it = annotations, end = annotations + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ class_linker->ResolveField(*dex_file, field_idx, dex_cache, class_loader, false);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ continue;
+ }
+ DCHECK(!soa.Self()->IsExceptionPending());
+ if (UNLIKELY(resolved_field->IsStatic())) {
+ continue;
+ }
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ it->is_volatile_ = resolved_field->IsVolatile() ? 1u : 0u;
+ it->field_offset_ = resolved_field->GetOffset();
+ it->declaring_dex_file_ = fields_class->GetDexCache()->GetDexFile();
+ it->declaring_class_idx_ = fields_class->GetDexTypeIndex();
+ it->declaring_field_idx_ = resolved_field->GetDexFieldIndex();
+ if (UNLIKELY(referrer_class.get() == nullptr)) {
+ continue;
+ }
+ if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache.get(), field_idx)) {
+ it->fast_get_ = 1u;
+ if (!resolved_field->IsFinal() || fields_class == referrer_class.get()) {
+ it->fast_put_ = 1u;
+ }
+ }
+ }
+}
+
+void SFieldAnnotation::Resolve(const DexCompilationUnit* mUnit,
+ SFieldAnnotation* annotations, size_t count) {
+ if (kIsDebugBuild) {
+ DCHECK(annotations != nullptr);
+ DCHECK_NE(count, 0u);
+ for (auto it = annotations, end = annotations + count; it != end; ++it) {
+ SFieldAnnotation unresolved(it->field_idx_);
+ DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+ }
+ }
+
+ const DexFile* dex_file = mUnit->GetDexFile();
+ ClassLinker* class_linker = mUnit->GetClassLinker();
+ uint32_t referrer_class_idx = dex_file->GetMethodId(mUnit->GetDexMethodIndex()).class_idx_;
+
+ // We're going to resolve fields and check access in a tight loop. It's better to hold
+ // the lock and needed references once than re-acquiring them again and again.
+ ScopedObjectAccess soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(*dex_file));
+ SirtRef<mirror::ClassLoader> class_loader(
+ soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ SirtRef<mirror::Class> referrer_class(soa.Self(),
+ class_linker->ResolveType(*dex_file, referrer_class_idx, dex_cache, class_loader));
+ if (UNLIKELY(referrer_class.get() == nullptr)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ // We're compiling a method without class definition. We may still resolve fields
+ // and update annotations, so fall through and check again in the loop.
+ }
+
+ for (auto it = annotations, end = annotations + count; it != end; ++it) {
+ uint32_t field_idx = it->field_idx_;
+ mirror::ArtField* resolved_field =
+ class_linker->ResolveField(*dex_file, field_idx, dex_cache, class_loader, true);
+ if (UNLIKELY(resolved_field == nullptr)) {
+ // Clean up the exception left by field resolution
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
+ continue;
+ }
+ DCHECK(!soa.Self()->IsExceptionPending());
+ if (UNLIKELY(!resolved_field->IsStatic())) {
+ continue;
+ }
+ mirror::Class* fields_class = resolved_field->GetDeclaringClass();
+ it->is_volatile_ = resolved_field->IsVolatile() ? 1u : 0u;
+ it->field_offset_ = resolved_field->GetOffset();
+ it->declaring_dex_file_ = fields_class->GetDexCache()->GetDexFile();
+ it->declaring_class_idx_ = fields_class->GetDexTypeIndex();
+ it->declaring_field_idx_ = resolved_field->GetDexFieldIndex();
+ if (UNLIKELY(referrer_class.get() == nullptr)) {
+ continue;
+ }
+ if (fields_class == referrer_class.get()) {
+ it->fast_get_ = 1u;
+ it->fast_put_ = 1u;
+ it->is_referrers_class_ = 1u; // implies no worrying about class initialization
+ it->is_initialized_ = 1u;
+ it->storage_index_ = fields_class->GetDexTypeIndex();
+ continue;
+ }
+ if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
+ dex_cache.get(), field_idx)) {
+ // We have the resolved field, we must make it into a index for the referrer
+ // in its static storage (which may fail if it doesn't have a slot for it)
+ // TODO: for images we can elide the static storage base null check
+ // if we know there's a non-null entry in the image
+ if (LIKELY(fields_class->GetDexCache() == dex_cache.get())) {
+ // common case where the dex cache of both the referrer and the field are the same,
+ // no need to search the dex file
+ it->storage_index_ = fields_class->GetDexTypeIndex();
+ } else {
+ // Search dex file for localized ssb index, may fail if field's class is a parent
+ // of the class mentioned in the dex file and there is no dex cache entry.
+ const DexFile::StringId* string_id =
+ dex_file->FindStringId(FieldHelper(resolved_field).GetDeclaringClassDescriptor());
+ if (string_id == nullptr) {
+ continue;
+ }
+ const DexFile::TypeId* type_id =
+ dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
+ if (type_id == nullptr) {
+ continue;
+ }
+ // medium path, needs check of static storage base being initialized
+ it->storage_index_ = dex_file->GetIndexForTypeId(*type_id);
+ }
+ it->fast_get_ = 1u;
+ it->fast_put_ = resolved_field->IsFinal() ? 0u : 1u;
+ DCHECK_EQ(it->is_referrers_class_, 0u);
+ it->is_initialized_ = fields_class->IsInitialized() &&
+ mUnit->GetCompilationUnit()->compiler_driver->CanAssumeTypeIsPresentInDexCache(
+ *dex_file, it->storage_index_);
+ }
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/mir_annotations.h b/compiler/dex/mir_annotations.h
new file mode 100644
index 0000000..85761de
--- /dev/null
+++ b/compiler/dex/mir_annotations.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_MIR_ANNOTATIONS_H_
+#define ART_COMPILER_DEX_MIR_ANNOTATIONS_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+namespace art {
+
+class DexCompilationUnit;
+
+/*
+ * Annotations are calculated from the perspective of the compilation unit that
+ * accesses the fields or methods. Since they are stored with that unit, they do not
+ * need to reference the dex file or method for which they have been calculated.
+ * However, we do store the dex file, declaring class index and field index of the
+ * resolved field to help distinguish between fields.
+ */
+
+class IFieldAnnotation {
+ public:
+ // For each requested instance field compute whether we can fast path the access with IGET/IPUT.
+ // If yes (at least for IGET), computes the offset and volatility.
+ static void Resolve(const DexCompilationUnit* mUnit, IFieldAnnotation* annotations, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved instance field annotation.
+ explicit IFieldAnnotation(uint16_t field_idx)
+ : field_idx_(field_idx),
+ fast_get_(0u),
+ fast_put_(0u),
+ is_volatile_(1u),
+ reserved_(0u),
+ field_offset_(0u),
+ declaring_dex_file_(nullptr),
+ declaring_class_idx_(0u),
+ declaring_field_idx_(0u) {
+ }
+
+ uint16_t FieldIndex() const {
+ return field_idx_;
+ }
+
+ bool FastGet() const {
+ return fast_get_ != 0u;
+ }
+
+ bool FastPut() const {
+ return fast_put_ != 0u;
+ }
+
+ bool IsVolatile() const {
+ return is_volatile_ != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ bool IsResolved() const {
+ return declaring_dex_file_ != nullptr;
+ }
+
+ const DexFile* DeclaringDexFile() const {
+ return declaring_dex_file_;
+ }
+
+ uint16_t DeclaringClassIndex() const {
+ return declaring_class_idx_;
+ }
+
+ uint16_t DeclaringFieldIndex() const {
+ return declaring_field_idx_;
+ }
+
+ private:
+ // The field index in the compiling method's dex file.
+ uint16_t field_idx_;
+ // Can the compiling method fast-path IGET from this field?
+ uint16_t fast_get_ : 1;
+ // Can the compiling method fast-path IPUT from this field?
+ uint16_t fast_put_ : 1;
+ // Is the field volatile? Unknown if unresolved, so treated as volatile.
+ uint16_t is_volatile_ : 1;
+ // Reserved.
+ uint16_t reserved_ : 13;
+ // The member offset of the field, MemberOffset(static_cast<size_t>(-1)) if unresolved.
+ MemberOffset field_offset_;
+ // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ const DexFile* declaring_dex_file_;
+ // The type index of the class declaring the field, 0 if unresolved.
+ uint16_t declaring_class_idx_;
+ // The field index in the dex file that defines field, 0 if unresolved.
+ uint16_t declaring_field_idx_;
+};
+
+class SFieldAnnotation {
+ public:
+ // For each requested static field compute whether we can fast path the access with SGET/SPUT.
+ // If yes (at least for SGET), computes the offset and volatility, storage index, and whether
+ // the access is from the same class or the class can be assumed initialized.
+ static void Resolve(const DexCompilationUnit* mUnit, SFieldAnnotation* annotations, size_t count)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ // Construct an unresolved static field annotation.
+ explicit SFieldAnnotation(uint16_t field_idx)
+ : field_idx_(field_idx),
+ fast_get_(0u),
+ fast_put_(0u),
+ is_volatile_(1u),
+ is_referrers_class_(0u),
+ is_initialized_(0u),
+ reserved_(0u),
+ field_offset_(0u),
+ storage_index_(DexFile::kDexNoIndex),
+ declaring_dex_file_(nullptr),
+ declaring_class_idx_(0u),
+ declaring_field_idx_(0u) {
+ }
+
+ uint16_t FieldIndex() const {
+ return field_idx_;
+ }
+
+ bool FastGet() const {
+ return fast_get_ != 0u;
+ }
+
+ bool FastPut() const {
+ return fast_put_ != 0u;
+ }
+
+ bool IsVolatile() const {
+ return is_volatile_ != 0u;
+ }
+
+ bool IsReferrersClass() const {
+ return is_referrers_class_ != 0u;
+ }
+
+ bool IsInitialized() const {
+ return is_initialized_ != 0u;
+ }
+
+ MemberOffset FieldOffset() const {
+ return field_offset_;
+ }
+
+ uint32_t StorageIndex() const {
+ return storage_index_;
+ }
+
+ bool IsResolved() const {
+ return declaring_dex_file_ != nullptr;
+ }
+
+ const DexFile* DeclaringDexFile() const {
+ return declaring_dex_file_;
+ }
+
+ uint16_t DeclaringClassIndex() const {
+ return declaring_class_idx_;
+ }
+
+ uint16_t DeclaringFieldIndex() const {
+ return declaring_field_idx_;
+ }
+
+ private:
+ // The field index in the compiling method's dex file.
+ uint16_t field_idx_;
+ // Can the compiling method fast-path IGET from this field?
+ uint16_t fast_get_ : 1;
+ // Can the compiling method fast-path IPUT from this field?
+ uint16_t fast_put_ : 1;
+ // Is the field volatile? Unknown if unresolved, so treated as volatile (true).
+ uint16_t is_volatile_ : 1;
+ // Is the field in the referrer's class? false if unresolved.
+ uint16_t is_referrers_class_ : 1;
+ // Can we assume that the field's class is already initialized? false if unresolved.
+ uint16_t is_initialized_ : 1;
+ // Reserved.
+ uint16_t reserved_ : 11;
+ // The member offset of the field, static_cast<size_t>(-1) if unresolved.
+ MemberOffset field_offset_;
+ // The type index of the declaring class in the compiling method's dex file,
+ // -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
+ uint32_t storage_index_;
+ // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ const DexFile* declaring_dex_file_;
+ // The type index of the class declaring the field, 0 if unresolved.
+ uint16_t declaring_class_idx_;
+ // The field index in the dex file that defines field, 0 if unresolved.
+ uint16_t declaring_field_idx_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_MIR_ANNOTATIONS_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e4550d1..39f2d0e 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -86,7 +86,9 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
forward_branches_(0),
compiler_temps_(arena, 6, kGrowableArrayMisc),
num_non_special_compiler_temps_(0),
- max_available_non_special_compiler_temps_(0) {
+ max_available_non_special_compiler_temps_(0),
+ ifield_annotations_(arena, 0u),
+ sfield_annotations_(arena, 0u) {
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
- std::abs(static_cast<int>(kVRegTempBaseReg));
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d304db9..ea0289b 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -22,6 +22,8 @@
#include "compiler_ir.h"
#include "arena_bit_vector.h"
#include "utils/growable_array.h"
+#include "invoke_type.h"
+#include "mir_annotations.h"
namespace art {
@@ -258,6 +260,12 @@ struct MIR {
MIR* throw_insn;
// Fused cmp branch condition.
ConditionCode ccode;
+ // IGET/IPUT annotation index, points to MIRGraph::ifield_annotations_. Due to limit on the
+ // number of code points (64K) and size of IGET/IPUT insn (2), this will never exceed 32K.
+ uint32_t ifield_annotation;
+ // SGET/SPUT annotation index, points to MIRGraph::sfield_annotations_. Due to limit on the
+ // number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
+ uint32_t sfield_annotation;
} meta;
};
@@ -466,6 +474,18 @@ class MIRGraph {
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ void DoAnnotateUsedFields();
+
+ const IFieldAnnotation& GetIFieldAnnotation(MIR* mir) {
+ DCHECK_LT(mir->meta.ifield_annotation, ifield_annotations_.Size());
+ return ifield_annotations_.GetRawStorage()[mir->meta.ifield_annotation];
+ }
+
+ const SFieldAnnotation& GetSFieldAnnotation(MIR* mir) {
+ DCHECK_LT(mir->meta.sfield_annotation, sfield_annotations_.Size());
+ return sfield_annotations_.GetRawStorage()[mir->meta.sfield_annotation];
+ }
+
void InitRegLocations();
void RemapRegLocations();
@@ -917,6 +937,8 @@ class MIRGraph {
size_t num_non_special_compiler_temps_;
size_t max_available_non_special_compiler_temps_;
size_t max_available_special_compiler_temps_;
+ GrowableArray<IFieldAnnotation> ifield_annotations_;
+ GrowableArray<SFieldAnnotation> sfield_annotations_;
};
} // namespace art
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index 4f8739a..4302fe2 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -91,6 +91,7 @@ void PassDriver::CreatePasses() {
* - This is not yet an issue: no current pass would require it.
*/
static const Pass* const passes[] = {
+ GetPassInstance<AnnotateUsedFields>(),
GetPassInstance<CodeLayout>(),
GetPassInstance<SSATransformation>(),
GetPassInstance<ConstantPropagation>(),
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 0533fbf..a0a83db 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -381,20 +381,14 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
const int r_base_;
};
-void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
+void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const SFieldAnnotation& annotation = mir_graph_->GetSFieldAnnotation(mir);
+ cu_->compiler_driver->ProcessedStaticField(annotation.FastPut(), annotation.IsReferrersClass());
+ if (annotation.FastPut() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(annotation.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (annotation.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
@@ -407,7 +401,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized.
// TODO: remove initialized check now that we are initializing classes in the compiler driver.
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(annotation.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -420,9 +414,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * annotation.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!annotation.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -437,7 +431,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ annotation.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -449,16 +443,16 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
} else {
rl_src = LoadValue(rl_src, kAnyReg);
}
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
+ StoreBaseDispWide(r_base, annotation.FieldOffset().Int32Value(), rl_src.low_reg,
rl_src.high_reg);
} else {
- StoreWordDisp(r_base, field_offset, rl_src.low_reg);
+ StoreWordDisp(r_base, annotation.FieldOffset().Int32Value(), rl_src.low_reg);
}
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
@@ -471,24 +465,18 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pSet32Static));
- CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
+ CallRuntimeHelperImmRegLocation(setter_offset, annotation.FieldIndex(), rl_src, true);
}
}
-void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
+void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object) {
- int field_offset;
- int storage_index;
- bool is_volatile;
- bool is_referrers_class;
- bool is_initialized;
- bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
- field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
- &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
- if (fast_path && !SLOW_FIELD_PATH) {
- DCHECK_GE(field_offset, 0);
+ const SFieldAnnotation& annotation = mir_graph_->GetSFieldAnnotation(mir);
+ cu_->compiler_driver->ProcessedStaticField(annotation.FastGet(), annotation.IsReferrersClass());
+ if (annotation.FastGet() && !SLOW_FIELD_PATH) {
+ DCHECK_GE(annotation.FieldOffset().Int32Value(), 0);
int r_base;
- if (is_referrers_class) {
+ if (annotation.IsReferrersClass()) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
@@ -497,7 +485,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
- DCHECK_GE(storage_index, 0);
+ DCHECK_NE(annotation.StorageIndex(), DexFile::kDexNoIndex);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
@@ -510,9 +498,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
r_base);
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * storage_index, r_base);
+ sizeof(int32_t*) * annotation.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!is_initialized) {
+ if (!annotation.IsInitialized()) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -527,7 +515,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
AddSlowPath(new (arena_) StaticFieldSlowPath(this,
unresolved_branch, uninit_branch, cont,
- storage_index, r_base));
+ annotation.StorageIndex(), r_base));
FreeTemp(r_tmp);
}
@@ -535,14 +523,14 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
}
// r_base now holds static storage base
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
+ LoadBaseDispWide(r_base, annotation.FieldOffset().Int32Value(), rl_result.low_reg,
rl_result.high_reg, INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_offset, rl_result.low_reg);
+ LoadWordDisp(r_base, annotation.FieldOffset().Int32Value(), rl_result.low_reg);
}
FreeTemp(r_base);
if (is_long_or_double) {
@@ -556,7 +544,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
:(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
: QUICK_ENTRYPOINT_OFFSET(pGet32Static));
- CallRuntimeHelperImm(getterOffset, field_idx, true);
+ CallRuntimeHelperImm(getterOffset, annotation.FieldIndex(), true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -698,18 +686,15 @@ void Mir2Lir::HandleThrowLaunchPads() {
}
}
-void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
-
- if (fast_path && !SLOW_FIELD_PATH) {
+ const IFieldAnnotation& annotation = mir_graph_->GetIFieldAnnotation(mir);
+ cu_->compiler_driver->ProcessedInstanceField(annotation.FastGet());
+ if (annotation.FastGet() && !SLOW_FIELD_PATH) {
RegLocation rl_result;
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(annotation.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
@@ -717,17 +702,17 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
+ LoadBaseDispWide(rl_obj.low_reg, annotation.FieldOffset().Int32Value(), rl_result.low_reg,
rl_result.high_reg, rl_obj.s_reg_low);
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
} else {
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, annotation.FieldOffset().Int32Value());
rl_result = EvalLoc(rl_dest, reg_class, true);
LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
@@ -736,9 +721,9 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
+ LoadBaseDisp(rl_obj.low_reg, annotation.FieldOffset().Int32Value(), rl_result.low_reg,
kWord, rl_obj.s_reg_low);
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
StoreValue(rl_dest, rl_result);
@@ -748,7 +733,7 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
- CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
+ CallRuntimeHelperImmRegLocation(getterOffset, annotation.FieldIndex(), rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
StoreValueWide(rl_dest, rl_result);
@@ -759,39 +744,37 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
}
}
-void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
- int field_offset;
- bool is_volatile;
-
- bool fast_path = FastInstance(field_idx, true, &field_offset, &is_volatile);
- if (fast_path && !SLOW_FIELD_PATH) {
+ const IFieldAnnotation& annotation = mir_graph_->GetIFieldAnnotation(mir);
+ cu_->compiler_driver->ProcessedInstanceField(annotation.FastPut());
+ if (annotation.FastPut() && !SLOW_FIELD_PATH) {
RegisterClass reg_class = oat_reg_class_by_size(size);
- DCHECK_GE(field_offset, 0);
+ DCHECK_GE(annotation.FieldOffset().Int32Value(), 0);
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
- if (is_volatile) {
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, annotation.FieldOffset().Int32Value());
+ if (annotation.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
- if (is_volatile) {
+ if (annotation.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
- if (is_volatile) {
+ StoreBaseDisp(rl_obj.low_reg, annotation.FieldOffset().Int32Value(), rl_src.low_reg, kWord);
+ if (annotation.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
@@ -803,7 +786,8 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
: (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
: QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
- CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
+ CallRuntimeHelperImmRegLocationRegLocation(setter_offset, annotation.FieldIndex(),
+ rl_obj, rl_src, true);
}
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 8c2ed36..00518bd 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -596,72 +596,72 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::IGET_OBJECT:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
break;
case Instruction::IGET_WIDE:
- GenIGet(vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+ GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
break;
case Instruction::IGET:
- GenIGet(vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_CHAR:
- GenIGet(vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_SHORT:
- GenIGet(vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_BOOLEAN:
case Instruction::IGET_BYTE:
- GenIGet(vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
break;
case Instruction::IPUT_WIDE:
- GenIPut(vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+ GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
break;
case Instruction::IPUT_OBJECT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
break;
case Instruction::IPUT:
- GenIPut(vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_BOOLEAN:
case Instruction::IPUT_BYTE:
- GenIPut(vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_CHAR:
- GenIPut(vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_SHORT:
- GenIPut(vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
break;
case Instruction::SGET_OBJECT:
- GenSget(vB, rl_dest, false, true);
+ GenSget(mir, rl_dest, false, true);
break;
case Instruction::SGET:
case Instruction::SGET_BOOLEAN:
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT:
- GenSget(vB, rl_dest, false, false);
+ GenSget(mir, rl_dest, false, false);
break;
case Instruction::SGET_WIDE:
- GenSget(vB, rl_dest, true, false);
+ GenSget(mir, rl_dest, true, false);
break;
case Instruction::SPUT_OBJECT:
- GenSput(vB, rl_src[0], false, true);
+ GenSput(mir, rl_src[0], false, true);
break;
case Instruction::SPUT:
@@ -669,11 +669,11 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT:
- GenSput(vB, rl_src[0], false, false);
+ GenSput(mir, rl_src[0], false, false);
break;
case Instruction::SPUT_WIDE:
- GenSput(vB, rl_src[0], true, false);
+ GenSput(mir, rl_src[0], true, false);
break;
case Instruction::INVOKE_STATIC_RANGE:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index c60c394..c36013f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -558,13 +558,13 @@ class Mir2Lir : public Backend {
void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src);
void GenFilledNewArray(CallInfo* info);
- void GenSput(uint32_t field_idx, RegLocation rl_src,
+ void GenSput(MIR* mir, RegLocation rl_src,
bool is_long_or_double, bool is_object);
- void GenSget(uint32_t field_idx, RegLocation rl_dest,
+ void GenSget(MIR* mir, RegLocation rl_dest,
bool is_long_or_double, bool is_object);
- void GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
- void GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
+ void GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
RegLocation rl_src);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 5adb792..c7bf952 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -881,6 +881,24 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
}
}
+void CompilerDriver::ProcessedInstanceField(bool resolved) {
+ if (!resolved) {
+ stats_->UnresolvedInstanceField();
+ } else {
+ stats_->ResolvedInstanceField();
+ }
+}
+
+void CompilerDriver::ProcessedStaticField(bool resolved, bool local) {
+ if (!resolved) {
+ stats_->UnresolvedStaticField();
+ } else if (local) {
+ stats_->ResolvedLocalStaticField();
+ } else {
+ stats_->ResolvedStaticField();
+ }
+}
+
static mirror::Class* ComputeCompilingMethodsClass(ScopedObjectAccess& soa,
SirtRef<mirror::DexCache>& dex_cache,
const DexCompilationUnit* mUnit)
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 092fe52..5c5382b 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -197,6 +197,9 @@ class CompilerDriver {
bool* is_type_initialized, bool* use_direct_type_ptr,
uintptr_t* direct_type_ptr);
+ void ProcessedInstanceField(bool resolved);
+ void ProcessedStaticField(bool resolved, bool local);
+
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,