summaryrefslogtreecommitdiffstats
path: root/runtime/verifier
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/verifier')
-rw-r--r--runtime/verifier/dex_gc_map.cc56
-rw-r--r--runtime/verifier/dex_gc_map.h122
-rw-r--r--runtime/verifier/instruction_flags.cc40
-rw-r--r--runtime/verifier/instruction_flags.h116
-rw-r--r--runtime/verifier/method_verifier.cc4126
-rw-r--r--runtime/verifier/method_verifier.h726
-rw-r--r--runtime/verifier/method_verifier_test.cc59
-rw-r--r--runtime/verifier/reg_type.cc1020
-rw-r--r--runtime/verifier/reg_type.h925
-rw-r--r--runtime/verifier/reg_type_cache-inl.h46
-rw-r--r--runtime/verifier/reg_type_cache.cc523
-rw-r--r--runtime/verifier/reg_type_cache.h166
-rw-r--r--runtime/verifier/reg_type_test.cc490
-rw-r--r--runtime/verifier/register_line-inl.h35
-rw-r--r--runtime/verifier/register_line.cc495
-rw-r--r--runtime/verifier/register_line.h358
16 files changed, 9303 insertions, 0 deletions
diff --git a/runtime/verifier/dex_gc_map.cc b/runtime/verifier/dex_gc_map.cc
new file mode 100644
index 0000000..cd0b137
--- /dev/null
+++ b/runtime/verifier/dex_gc_map.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verifier/dex_gc_map.h"
+
+#include "base/logging.h"
+
+namespace art {
+namespace verifier {
+
+const uint8_t* DexPcToReferenceMap::FindBitMap(uint16_t dex_pc, bool error_if_not_present) const {
+ size_t num_entries = NumEntries();
+ // Do linear or binary search?
+ static const size_t kSearchThreshold = 8;
+ if (num_entries < kSearchThreshold) {
+ for (size_t i = 0; i < num_entries; i++) {
+ if (GetDexPc(i) == dex_pc) {
+ return GetBitMap(i);
+ }
+ }
+ } else {
+ int lo = 0;
+ int hi = num_entries -1;
+ while (hi >= lo) {
+ int mid = (hi + lo) / 2;
+ int mid_pc = GetDexPc(mid);
+ if (dex_pc > mid_pc) {
+ lo = mid + 1;
+ } else if (dex_pc < mid_pc) {
+ hi = mid - 1;
+ } else {
+ return GetBitMap(mid);
+ }
+ }
+ }
+ if (error_if_not_present) {
+ LOG(ERROR) << "Didn't find reference bit map for dex_pc " << dex_pc;
+ }
+ return NULL;
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
new file mode 100644
index 0000000..673112b
--- /dev/null
+++ b/runtime/verifier/dex_gc_map.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_DEX_GC_MAP_H_
+#define ART_SRC_VERIFIER_DEX_GC_MAP_H_
+
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+
+namespace art {
+namespace verifier {
+
+/*
+ * Format enumeration for RegisterMap data area.
+ */
+enum RegisterMapFormat {
+ kRegMapFormatUnknown = 0,
+ kRegMapFormatNone = 1, // Indicates no map data follows.
+ kRegMapFormatCompact8 = 2, // Compact layout, 8-bit addresses.
+ kRegMapFormatCompact16 = 3, // Compact layout, 16-bit addresses.
+};
+
+// Lightweight wrapper for Dex PC to reference bit maps.
+class DexPcToReferenceMap {
+ public:
+ DexPcToReferenceMap(const uint8_t* data, size_t data_length) : data_(data) {
+ CHECK(data_ != NULL);
+ // Check the size of the table agrees with the number of entries
+ size_t data_size = data_length - 4;
+ DCHECK_EQ(EntryWidth() * NumEntries(), data_size);
+ }
+
+ // The number of entries in the table
+ size_t NumEntries() const {
+ return GetData()[2] | (GetData()[3] << 8);
+ }
+
+ // Get the Dex PC at the given index
+ uint16_t GetDexPc(size_t index) const {
+ size_t entry_offset = index * EntryWidth();
+ if (DexPcWidth() == 1) {
+ return Table()[entry_offset];
+ } else {
+ return Table()[entry_offset] | (Table()[entry_offset + 1] << 8);
+ }
+ }
+
+ // Return address of bitmap encoding what are live references
+ const uint8_t* GetBitMap(size_t index) const {
+ size_t entry_offset = index * EntryWidth();
+ return &Table()[entry_offset + DexPcWidth()];
+ }
+
+ // Find the bitmap associated with the given dex pc
+ const uint8_t* FindBitMap(uint16_t dex_pc, bool error_if_not_present = true) const;
+
+ // The number of bytes used to encode registers
+ size_t RegWidth() const {
+ return GetData()[1] | ((GetData()[0] & ~kRegMapFormatMask) << kRegMapFormatShift);
+ }
+
+ private:
+ // Table of num_entries * (dex pc, bitmap)
+ const uint8_t* Table() const {
+ return GetData() + 4;
+ }
+
+ // The format of the table of the PCs for the table
+ RegisterMapFormat Format() const {
+ return static_cast<RegisterMapFormat>(GetData()[0] & kRegMapFormatMask);
+ }
+
+ // Number of bytes used to encode a dex pc
+ size_t DexPcWidth() const {
+ RegisterMapFormat format = Format();
+ switch (format) {
+ case kRegMapFormatCompact8:
+ return 1;
+ case kRegMapFormatCompact16:
+ return 2;
+ default:
+ LOG(FATAL) << "Invalid format " << static_cast<int>(format);
+ return -1;
+ }
+ }
+
+ // The width of an entry in the table
+ size_t EntryWidth() const {
+ return DexPcWidth() + RegWidth();
+ }
+
+ const uint8_t* GetData() const {
+ return data_;
+ }
+
+ friend class MethodVerifier;
+
+ static const int kRegMapFormatShift = 5;
+ static const uint8_t kRegMapFormatMask = 0x7;
+
+ const uint8_t* const data_; // The header and table data
+};
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_DEX_GC_MAP_H_
diff --git a/runtime/verifier/instruction_flags.cc b/runtime/verifier/instruction_flags.cc
new file mode 100644
index 0000000..358791d
--- /dev/null
+++ b/runtime/verifier/instruction_flags.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_flags.h"
+
+#include <string.h>
+
+namespace art {
+namespace verifier {
+
+std::string InstructionFlags::ToString() const {
+ char encoding[6];
+ if (!IsOpcode()) {
+ strncpy(encoding, "XXXXX", sizeof(encoding));
+ } else {
+ strncpy(encoding, "-----", sizeof(encoding));
+ if (IsInTry()) encoding[kInTry] = 'T';
+ if (IsBranchTarget()) encoding[kBranchTarget] = 'B';
+ if (IsCompileTimeInfoPoint()) encoding[kCompileTimeInfoPoint] = 'G';
+ if (IsVisited()) encoding[kVisited] = 'V';
+ if (IsChanged()) encoding[kChanged] = 'C';
+ }
+ return encoding;
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
new file mode 100644
index 0000000..9dc3ea7
--- /dev/null
+++ b/runtime/verifier/instruction_flags.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_METHOD_INSTRUCTION_FLAGS_H_
+#define ART_SRC_VERIFIER_METHOD_INSTRUCTION_FLAGS_H_
+
+#include "base/logging.h"
+
+#include <stdint.h>
+#include <string>
+
+namespace art {
+namespace verifier {
+
+class InstructionFlags {
+ public:
+ InstructionFlags() : length_(0), flags_(0) {}
+
+ void SetLengthInCodeUnits(size_t length) {
+ DCHECK_LT(length, 65536u);
+ length_ = length;
+ }
+ size_t GetLengthInCodeUnits() {
+ return length_;
+ }
+ bool IsOpcode() const {
+ return length_ != 0;
+ }
+
+ void SetInTry() {
+ flags_ |= 1 << kInTry;
+ }
+ void ClearInTry() {
+ flags_ &= ~(1 << kInTry);
+ }
+ bool IsInTry() const {
+ return (flags_ & (1 << kInTry)) != 0;
+ }
+
+ void SetBranchTarget() {
+ flags_ |= 1 << kBranchTarget;
+ }
+ void ClearBranchTarget() {
+ flags_ &= ~(1 << kBranchTarget);
+ }
+ bool IsBranchTarget() const {
+ return (flags_ & (1 << kBranchTarget)) != 0;
+ }
+ void SetCompileTimeInfoPoint() {
+ flags_ |= 1 << kCompileTimeInfoPoint;
+ }
+ void ClearCompileTimeInfoPoint() {
+ flags_ &= ~(1 << kCompileTimeInfoPoint);
+ }
+ bool IsCompileTimeInfoPoint() const {
+ return (flags_ & (1 << kCompileTimeInfoPoint)) != 0;
+ }
+
+ void SetVisited() {
+ flags_ |= 1 << kVisited;
+ }
+ void ClearVisited() {
+ flags_ &= ~(1 << kVisited);
+ }
+ bool IsVisited() const {
+ return (flags_ & (1 << kVisited)) != 0;
+ }
+
+ void SetChanged() {
+ flags_ |= 1 << kChanged;
+ }
+ void ClearChanged() {
+ flags_ &= ~(1 << kChanged);
+ }
+ bool IsChanged() const {
+ return (flags_ & (1 << kChanged)) != 0;
+ }
+
+ bool IsVisitedOrChanged() const {
+ return IsVisited() || IsChanged();
+ }
+
+ std::string ToString() const;
+
+ private:
+ enum {
+ kInTry,
+ kBranchTarget,
+ kCompileTimeInfoPoint, // Location of interest to the compiler for GC maps and
+ // verifier based method sharpening.
+ kVisited,
+ kChanged,
+ };
+
+ // Size of instruction in code units.
+ uint16_t length_;
+ uint8_t flags_;
+};
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_METHOD_INSTRUCTION_FLAGS_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
new file mode 100644
index 0000000..5a70f2a
--- /dev/null
+++ b/runtime/verifier/method_verifier.cc
@@ -0,0 +1,4126 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method_verifier.h"
+
+#include <iostream>
+
+#include "base/logging.h"
+#include "base/mutex-inl.h"
+#include "base/stringpiece.h"
+#include "class_linker.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "dex_instruction_visitor.h"
+#include "gc/accounting/card_table-inl.h"
+#include "indenter.h"
+#include "intern_table.h"
+#include "leb128.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "register_line-inl.h"
+#include "runtime.h"
+#include "verifier/dex_gc_map.h"
+
+namespace art {
+namespace verifier {
+
+static const bool gDebugVerify = false;
+
+void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
+ uint32_t insns_size, uint16_t registers_size,
+ MethodVerifier* verifier) {
+ DCHECK_GT(insns_size, 0U);
+
+ for (uint32_t i = 0; i < insns_size; i++) {
+ bool interesting = false;
+ switch (mode) {
+ case kTrackRegsAll:
+ interesting = flags[i].IsOpcode();
+ break;
+ case kTrackCompilerInterestPoints:
+ interesting = flags[i].IsCompileTimeInfoPoint() || flags[i].IsBranchTarget() ;
+ break;
+ case kTrackRegsBranches:
+ interesting = flags[i].IsBranchTarget();
+ break;
+ default:
+ break;
+ }
+ if (interesting) {
+ pc_to_register_line_.Put(i, new RegisterLine(registers_size, verifier));
+ }
+ }
+}
+
+MethodVerifier::FailureKind MethodVerifier::VerifyClass(const mirror::Class* klass,
+ std::string& error,
+ bool allow_soft_failures) {
+ if (klass->IsVerified()) {
+ return kNoFailure;
+ }
+ mirror::Class* super = klass->GetSuperClass();
+ if (super == NULL && StringPiece(ClassHelper(klass).GetDescriptor()) != "Ljava/lang/Object;") {
+ error = "Verifier rejected class ";
+ error += PrettyDescriptor(klass);
+ error += " that has no super class";
+ return kHardFailure;
+ }
+ if (super != NULL && super->IsFinal()) {
+ error = "Verifier rejected class ";
+ error += PrettyDescriptor(klass);
+ error += " that attempts to sub-class final class ";
+ error += PrettyDescriptor(super);
+ return kHardFailure;
+ }
+ ClassHelper kh(klass);
+ const DexFile& dex_file = kh.GetDexFile();
+ uint32_t class_def_idx;
+ if (!dex_file.FindClassDefIndex(kh.GetDescriptor(), class_def_idx)) {
+ error = "Verifier rejected class ";
+ error += PrettyDescriptor(klass);
+ error += " that isn't present in dex file ";
+ error += dex_file.GetLocation();
+ return kHardFailure;
+ }
+ return VerifyClass(&dex_file, kh.GetDexCache(), klass->GetClassLoader(), class_def_idx, error, allow_soft_failures);
+}
+
+MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
+ uint32_t class_def_idx,
+ std::string& error,
+ bool allow_soft_failures) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+ const byte* class_data = dex_file->GetClassData(class_def);
+ if (class_data == NULL) {
+ // empty class, probably a marker interface
+ return kNoFailure;
+ }
+ ClassDataItemIterator it(*dex_file, class_data);
+ while (it.HasNextStaticField() || it.HasNextInstanceField()) {
+ it.Next();
+ }
+ size_t error_count = 0;
+ bool hard_fail = false;
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ int64_t previous_direct_method_idx = -1;
+ while (it.HasNextDirectMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == previous_direct_method_idx) {
+ // smali can create dex files with two encoded_methods sharing the same method_idx
+ // http://code.google.com/p/smali/issues/detail?id=119
+ it.Next();
+ continue;
+ }
+ previous_direct_method_idx = method_idx;
+ InvokeType type = it.GetMethodInvokeType(class_def);
+ mirror::AbstractMethod* method =
+ linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type);
+ if (method == NULL) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ // We couldn't resolve the method, but continue regardless.
+ Thread::Current()->ClearException();
+ }
+ MethodVerifier::FailureKind result = VerifyMethod(method_idx, dex_file, dex_cache, class_loader,
+ class_def_idx, it.GetMethodCodeItem(), method, it.GetMemberAccessFlags(), allow_soft_failures);
+ if (result != kNoFailure) {
+ if (result == kHardFailure) {
+ hard_fail = true;
+ if (error_count > 0) {
+ error += "\n";
+ }
+ error = "Verifier rejected class ";
+ error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
+ error += " due to bad method ";
+ error += PrettyMethod(method_idx, *dex_file);
+ }
+ ++error_count;
+ }
+ it.Next();
+ }
+ int64_t previous_virtual_method_idx = -1;
+ while (it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ if (method_idx == previous_virtual_method_idx) {
+ // smali can create dex files with two encoded_methods sharing the same method_idx
+ // http://code.google.com/p/smali/issues/detail?id=119
+ it.Next();
+ continue;
+ }
+ previous_virtual_method_idx = method_idx;
+ InvokeType type = it.GetMethodInvokeType(class_def);
+ mirror::AbstractMethod* method =
+ linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader, NULL, type);
+ if (method == NULL) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ // We couldn't resolve the method, but continue regardless.
+ Thread::Current()->ClearException();
+ }
+ MethodVerifier::FailureKind result = VerifyMethod(method_idx, dex_file, dex_cache, class_loader,
+ class_def_idx, it.GetMethodCodeItem(), method, it.GetMemberAccessFlags(), allow_soft_failures);
+ if (result != kNoFailure) {
+ if (result == kHardFailure) {
+ hard_fail = true;
+ if (error_count > 0) {
+ error += "\n";
+ }
+ error = "Verifier rejected class ";
+ error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
+ error += " due to bad method ";
+ error += PrettyMethod(method_idx, *dex_file);
+ }
+ ++error_count;
+ }
+ it.Next();
+ }
+ if (error_count == 0) {
+ return kNoFailure;
+ } else {
+ return hard_fail ? kHardFailure : kSoftFailure;
+ }
+}
+
+MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
+ const DexFile* dex_file,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader,
+ uint32_t class_def_idx,
+ const DexFile::CodeItem* code_item,
+ mirror::AbstractMethod* method,
+ uint32_t method_access_flags,
+ bool allow_soft_failures) {
+ MethodVerifier::FailureKind result = kNoFailure;
+ uint64_t start_ns = NanoTime();
+
+ MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def_idx, code_item, method_idx,
+ method, method_access_flags, true, allow_soft_failures);
+ if (verifier.Verify()) {
+ // Verification completed, however failures may be pending that didn't cause the verification
+ // to hard fail.
+ CHECK(!verifier.have_pending_hard_failure_);
+ if (verifier.failures_.size() != 0) {
+ verifier.DumpFailures(LOG(INFO) << "Soft verification failures in "
+ << PrettyMethod(method_idx, *dex_file) << "\n");
+ result = kSoftFailure;
+ }
+ } else {
+ // Bad method data.
+ CHECK_NE(verifier.failures_.size(), 0U);
+ CHECK(verifier.have_pending_hard_failure_);
+ verifier.DumpFailures(LOG(INFO) << "Verification error in "
+ << PrettyMethod(method_idx, *dex_file) << "\n");
+ if (gDebugVerify) {
+ std::cout << "\n" << verifier.info_messages_.str();
+ verifier.Dump(std::cout);
+ }
+ result = kHardFailure;
+ }
+ uint64_t duration_ns = NanoTime() - start_ns;
+ if (duration_ns > MsToNs(100)) {
+ LOG(WARNING) << "Verification of " << PrettyMethod(method_idx, *dex_file)
+ << " took " << PrettyDuration(duration_ns);
+ }
+ return result;
+}
+
+void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
+ const DexFile* dex_file, mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader, uint32_t class_def_idx,
+ const DexFile::CodeItem* code_item,
+ mirror::AbstractMethod* method,
+ uint32_t method_access_flags) {
+ MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def_idx, code_item,
+ dex_method_idx, method, method_access_flags, true, true);
+ verifier.Verify();
+ verifier.DumpFailures(os);
+ os << verifier.info_messages_.str();
+ verifier.Dump(os);
+}
+
+MethodVerifier::MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader, uint32_t class_def_idx,
+ const DexFile::CodeItem* code_item,
+ uint32_t dex_method_idx, mirror::AbstractMethod* method,
+ uint32_t method_access_flags, bool can_load_classes,
+ bool allow_soft_failures)
+ : reg_types_(can_load_classes),
+ work_insn_idx_(-1),
+ dex_method_idx_(dex_method_idx),
+ mirror_method_(method),
+ method_access_flags_(method_access_flags),
+ dex_file_(dex_file),
+ dex_cache_(dex_cache),
+ class_loader_(class_loader),
+ class_def_idx_(class_def_idx),
+ code_item_(code_item),
+ declaring_class_(NULL),
+ interesting_dex_pc_(-1),
+ monitor_enter_dex_pcs_(NULL),
+ have_pending_hard_failure_(false),
+ have_pending_runtime_throw_failure_(false),
+ new_instance_count_(0),
+ monitor_enter_count_(0),
+ can_load_classes_(can_load_classes),
+ allow_soft_failures_(allow_soft_failures) {
+}
+
+void MethodVerifier::FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc,
+ std::vector<uint32_t>& monitor_enter_dex_pcs) {
+ MethodHelper mh(m);
+ MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
+ mh.GetClassDefIndex(), mh.GetCodeItem(), m->GetDexMethodIndex(),
+ m, m->GetAccessFlags(), false, true);
+ verifier.interesting_dex_pc_ = dex_pc;
+ verifier.monitor_enter_dex_pcs_ = &monitor_enter_dex_pcs;
+ verifier.FindLocksAtDexPc();
+}
+
+void MethodVerifier::FindLocksAtDexPc() {
+ CHECK(monitor_enter_dex_pcs_ != NULL);
+ CHECK(code_item_ != NULL); // This only makes sense for methods with code.
+
+ // Strictly speaking, we ought to be able to get away with doing a subset of the full method
+ // verification. In practice, the phase we want relies on data structures set up by all the
+ // earlier passes, so we just run the full method verification and bail out early when we've
+ // got what we wanted.
+ Verify();
+}
+
+mirror::Field* MethodVerifier::FindAccessedFieldAtDexPc(mirror::AbstractMethod* m,
+ uint32_t dex_pc) {
+ MethodHelper mh(m);
+ MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
+ mh.GetClassDefIndex(), mh.GetCodeItem(), m->GetDexMethodIndex(),
+ m, m->GetAccessFlags(), false, true);
+ return verifier.FindAccessedFieldAtDexPc(dex_pc);
+}
+
+mirror::Field* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
+ CHECK(code_item_ != NULL); // This only makes sense for methods with code.
+
+ // Strictly speaking, we ought to be able to get away with doing a subset of the full method
+ // verification. In practice, the phase we want relies on data structures set up by all the
+ // earlier passes, so we just run the full method verification and bail out early when we've
+ // got what we wanted.
+ bool success = Verify();
+ if (!success) {
+ return NULL;
+ }
+ RegisterLine* register_line = reg_table_.GetLine(dex_pc);
+ if (register_line == NULL) {
+ return NULL;
+ }
+ const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
+ return GetQuickFieldAccess(inst, register_line);
+}
+
+mirror::AbstractMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::AbstractMethod* m,
+ uint32_t dex_pc) {
+ MethodHelper mh(m);
+ MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
+ mh.GetClassDefIndex(), mh.GetCodeItem(), m->GetDexMethodIndex(),
+ m, m->GetAccessFlags(), false, true);
+ return verifier.FindInvokedMethodAtDexPc(dex_pc);
+}
+
+mirror::AbstractMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
+ CHECK(code_item_ != NULL); // This only makes sense for methods with code.
+
+ // Strictly speaking, we ought to be able to get away with doing a subset of the full method
+ // verification. In practice, the phase we want relies on data structures set up by all the
+ // earlier passes, so we just run the full method verification and bail out early when we've
+ // got what we wanted.
+ bool success = Verify();
+ if (!success) {
+ return NULL;
+ }
+ RegisterLine* register_line = reg_table_.GetLine(dex_pc);
+ if (register_line == NULL) {
+ return NULL;
+ }
+ const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
+ const bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
+ return GetQuickInvokedMethod(inst, register_line, is_range);
+}
+
+bool MethodVerifier::Verify() {
+ // If there aren't any instructions, make sure that's expected, then exit successfully.
+ if (code_item_ == NULL) {
+ if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
+ return false;
+ } else {
+ return true;
+ }
+ }
+ // Sanity-check the register counts. ins + locals = registers, so make sure that ins <= registers.
+ if (code_item_->ins_size_ > code_item_->registers_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad register counts (ins=" << code_item_->ins_size_
+ << " regs=" << code_item_->registers_size_;
+ return false;
+ }
+ // Allocate and initialize an array to hold instruction data.
+ insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+ // Run through the instructions and see if the width checks out.
+ bool result = ComputeWidthsAndCountOps();
+ // Flag instructions guarded by a "try" block and check exception handlers.
+ result = result && ScanTryCatchBlocks();
+ // Perform static instruction verification.
+ result = result && VerifyInstructions();
+ // Perform code-flow analysis and return.
+ return result && VerifyCodeFlow();
+}
+
+std::ostream& MethodVerifier::Fail(VerifyError error) {
+ switch (error) {
+ case VERIFY_ERROR_NO_CLASS:
+ case VERIFY_ERROR_NO_FIELD:
+ case VERIFY_ERROR_NO_METHOD:
+ case VERIFY_ERROR_ACCESS_CLASS:
+ case VERIFY_ERROR_ACCESS_FIELD:
+ case VERIFY_ERROR_ACCESS_METHOD:
+ case VERIFY_ERROR_INSTANTIATION:
+ case VERIFY_ERROR_CLASS_CHANGE:
+ if (Runtime::Current()->IsCompiler() || !can_load_classes_) {
+ // If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
+ // class change and instantiation errors into soft verification errors so that we re-verify
+ // at runtime. We may fail to find or to agree on access because of not yet available class
+ // loaders, or class loaders that will differ at runtime. In these cases, we don't want to
+ // affect the soundness of the code being compiled. Instead, the generated code runs "slow
+ // paths" that dynamically perform the verification and cause the behavior to be that akin
+ // to an interpreter.
+ error = VERIFY_ERROR_BAD_CLASS_SOFT;
+ } else {
+ have_pending_runtime_throw_failure_ = true;
+ }
+ break;
+ // Indication that verification should be retried at runtime.
+ case VERIFY_ERROR_BAD_CLASS_SOFT:
+ if (!allow_soft_failures_) {
+ have_pending_hard_failure_ = true;
+ }
+ break;
+ // Hard verification failures at compile time will still fail at runtime, so the class is
+ // marked as rejected to prevent it from being compiled.
+ case VERIFY_ERROR_BAD_CLASS_HARD: {
+ if (Runtime::Current()->IsCompiler()) {
+ ClassReference ref(dex_file_, class_def_idx_);
+ AddRejectedClass(ref);
+ }
+ have_pending_hard_failure_ = true;
+ break;
+ }
+ }
+ failures_.push_back(error);
+ std::string location(StringPrintf("%s: [0x%X]", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
+ work_insn_idx_));
+ std::ostringstream* failure_message = new std::ostringstream(location);
+ failure_messages_.push_back(failure_message);
+ return *failure_message;
+}
+
+void MethodVerifier::PrependToLastFailMessage(std::string prepend) {
+ size_t failure_num = failure_messages_.size();
+ DCHECK_NE(failure_num, 0U);
+ std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
+ prepend += last_fail_message->str();
+ failure_messages_[failure_num - 1] = new std::ostringstream(prepend);
+ delete last_fail_message;
+}
+
+void MethodVerifier::AppendToLastFailMessage(std::string append) {
+ size_t failure_num = failure_messages_.size();
+ DCHECK_NE(failure_num, 0U);
+ std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
+ (*last_fail_message) << append;
+}
+
+bool MethodVerifier::ComputeWidthsAndCountOps() {
+ const uint16_t* insns = code_item_->insns_;
+ size_t insns_size = code_item_->insns_size_in_code_units_;
+ const Instruction* inst = Instruction::At(insns);
+ size_t new_instance_count = 0;
+ size_t monitor_enter_count = 0;
+ size_t dex_pc = 0;
+
+ while (dex_pc < insns_size) {
+ Instruction::Code opcode = inst->Opcode();
+ if (opcode == Instruction::NEW_INSTANCE) {
+ new_instance_count++;
+ } else if (opcode == Instruction::MONITOR_ENTER) {
+ monitor_enter_count++;
+ }
+ size_t inst_size = inst->SizeInCodeUnits();
+ insn_flags_[dex_pc].SetLengthInCodeUnits(inst_size);
+ dex_pc += inst_size;
+ inst = inst->Next();
+ }
+
+ if (dex_pc != insns_size) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "code did not end where expected ("
+ << dex_pc << " vs. " << insns_size << ")";
+ return false;
+ }
+
+ new_instance_count_ = new_instance_count;
+ monitor_enter_count_ = monitor_enter_count;
+ return true;
+}
+
+bool MethodVerifier::ScanTryCatchBlocks() {
+ uint32_t tries_size = code_item_->tries_size_;
+ if (tries_size == 0) {
+ return true;
+ }
+ uint32_t insns_size = code_item_->insns_size_in_code_units_;
+ const DexFile::TryItem* tries = DexFile::GetTryItems(*code_item_, 0);
+
+ for (uint32_t idx = 0; idx < tries_size; idx++) {
+ const DexFile::TryItem* try_item = &tries[idx];
+ uint32_t start = try_item->start_addr_;
+ uint32_t end = start + try_item->insn_count_;
+ if ((start >= end) || (start >= insns_size) || (end > insns_size)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad exception entry: startAddr=" << start
+ << " endAddr=" << end << " (size=" << insns_size << ")";
+ return false;
+ }
+ if (!insn_flags_[start].IsOpcode()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'try' block starts inside an instruction (" << start << ")";
+ return false;
+ }
+ for (uint32_t dex_pc = start; dex_pc < end;
+ dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits()) {
+ insn_flags_[dex_pc].SetInTry();
+ }
+ }
+ // Iterate over each of the handlers to verify target addresses.
+ const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ for (uint32_t idx = 0; idx < handlers_size; idx++) {
+ CatchHandlerIterator iterator(handlers_ptr);
+ for (; iterator.HasNext(); iterator.Next()) {
+ uint32_t dex_pc= iterator.GetHandlerAddress();
+ if (!insn_flags_[dex_pc].IsOpcode()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "exception handler starts at bad address (" << dex_pc << ")";
+ return false;
+ }
+ const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
+ if (inst->Opcode() != Instruction::MOVE_EXCEPTION) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "exception handler doesn't start with move-exception ("
+ << dex_pc << ")";
+ return false;
+ }
+ insn_flags_[dex_pc].SetBranchTarget();
+ // Ensure exception types are resolved so that they don't need resolution to be delivered,
+ // unresolved exception types will be ignored by exception delivery
+ if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
+ mirror::Class* exception_type = linker->ResolveType(*dex_file_,
+ iterator.GetHandlerTypeIndex(),
+ dex_cache_, class_loader_);
+ if (exception_type == NULL) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ Thread::Current()->ClearException();
+ }
+ }
+ }
+ handlers_ptr = iterator.EndDataPointer();
+ }
+ return true;
+}
+
+bool MethodVerifier::VerifyInstructions() {
+ const Instruction* inst = Instruction::At(code_item_->insns_);
+
+ /* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
+ insn_flags_[0].SetBranchTarget();
+ insn_flags_[0].SetCompileTimeInfoPoint();
+
+ uint32_t insns_size = code_item_->insns_size_in_code_units_;
+ for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
+ if (!VerifyInstruction(inst, dex_pc)) {
+ DCHECK_NE(failures_.size(), 0U);
+ return false;
+ }
+ /* Flag instructions that are garbage collection points */
+ // All invoke points are marked as "Throw" points already.
+ // We are relying on this to also count all the invokes as interesting.
+ if (inst->IsBranch() || inst->IsSwitch() || inst->IsThrow() || inst->IsReturn()) {
+ insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ }
+ dex_pc += inst->SizeInCodeUnits();
+ inst = inst->Next();
+ }
+ return true;
+}
+
+bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_offset) {
+ DecodedInstruction dec_insn(inst);
+ bool result = true;
+ switch (inst->GetVerifyTypeArgumentA()) {
+ case Instruction::kVerifyRegA:
+ result = result && CheckRegisterIndex(dec_insn.vA);
+ break;
+ case Instruction::kVerifyRegAWide:
+ result = result && CheckWideRegisterIndex(dec_insn.vA);
+ break;
+ }
+ switch (inst->GetVerifyTypeArgumentB()) {
+ case Instruction::kVerifyRegB:
+ result = result && CheckRegisterIndex(dec_insn.vB);
+ break;
+ case Instruction::kVerifyRegBField:
+ result = result && CheckFieldIndex(dec_insn.vB);
+ break;
+ case Instruction::kVerifyRegBMethod:
+ result = result && CheckMethodIndex(dec_insn.vB);
+ break;
+ case Instruction::kVerifyRegBNewInstance:
+ result = result && CheckNewInstance(dec_insn.vB);
+ break;
+ case Instruction::kVerifyRegBString:
+ result = result && CheckStringIndex(dec_insn.vB);
+ break;
+ case Instruction::kVerifyRegBType:
+ result = result && CheckTypeIndex(dec_insn.vB);
+ break;
+ case Instruction::kVerifyRegBWide:
+ result = result && CheckWideRegisterIndex(dec_insn.vB);
+ break;
+ }
+ switch (inst->GetVerifyTypeArgumentC()) {
+ case Instruction::kVerifyRegC:
+ result = result && CheckRegisterIndex(dec_insn.vC);
+ break;
+ case Instruction::kVerifyRegCField:
+ result = result && CheckFieldIndex(dec_insn.vC);
+ break;
+ case Instruction::kVerifyRegCNewArray:
+ result = result && CheckNewArray(dec_insn.vC);
+ break;
+ case Instruction::kVerifyRegCType:
+ result = result && CheckTypeIndex(dec_insn.vC);
+ break;
+ case Instruction::kVerifyRegCWide:
+ result = result && CheckWideRegisterIndex(dec_insn.vC);
+ break;
+ }
+ switch (inst->GetVerifyExtraFlags()) {
+ case Instruction::kVerifyArrayData:
+ result = result && CheckArrayData(code_offset);
+ break;
+ case Instruction::kVerifyBranchTarget:
+ result = result && CheckBranchTarget(code_offset);
+ break;
+ case Instruction::kVerifySwitchTargets:
+ result = result && CheckSwitchTargets(code_offset);
+ break;
+ case Instruction::kVerifyVarArg:
+ result = result && CheckVarArgRegs(dec_insn.vA, dec_insn.arg);
+ break;
+ case Instruction::kVerifyVarArgRange:
+ result = result && CheckVarArgRangeRegs(dec_insn.vA, dec_insn.vC);
+ break;
+ case Instruction::kVerifyError:
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected opcode " << inst->Name();
+ result = false;
+ break;
+ }
+ return result;
+}
+
+bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
+ if (idx >= code_item_->registers_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
+ << code_item_->registers_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
+ if (idx + 1 >= code_item_->registers_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
+ << "+1 >= " << code_item_->registers_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().field_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
+ << dex_file_->GetHeader().field_ids_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().method_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
+ << dex_file_->GetHeader().method_ids_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckNewInstance(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().type_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
+ << dex_file_->GetHeader().type_ids_size_ << ")";
+ return false;
+ }
+ // We don't need the actual class, just a pointer to the class name.
+ const char* descriptor = dex_file_->StringByTypeIdx(idx);
+ if (descriptor[0] != 'L') {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't call new-instance on type '" << descriptor << "'";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckStringIndex(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().string_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
+ << dex_file_->GetHeader().string_ids_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckTypeIndex(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().type_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
+ << dex_file_->GetHeader().type_ids_size_ << ")";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckNewArray(uint32_t idx) {
+ if (idx >= dex_file_->GetHeader().type_ids_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
+ << dex_file_->GetHeader().type_ids_size_ << ")";
+ return false;
+ }
+ int bracket_count = 0;
+ const char* descriptor = dex_file_->StringByTypeIdx(idx);
+ const char* cp = descriptor;
+ while (*cp++ == '[') {
+ bracket_count++;
+ }
+ if (bracket_count == 0) {
+ /* The given class must be an array type. */
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't new-array class '" << descriptor << "' (not an array)";
+ return false;
+ } else if (bracket_count > 255) {
+ /* It is illegal to create an array of more than 255 dimensions. */
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't new-array class '" << descriptor << "' (exceeds limit)";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckArrayData(uint32_t cur_offset) {
+ const uint32_t insn_count = code_item_->insns_size_in_code_units_;
+ const uint16_t* insns = code_item_->insns_ + cur_offset;
+ const uint16_t* array_data;
+ int32_t array_data_offset;
+
+ DCHECK_LT(cur_offset, insn_count);
+ /* make sure the start of the array data table is in range */
+ array_data_offset = insns[1] | (((int32_t) insns[2]) << 16);
+ if ((int32_t) cur_offset + array_data_offset < 0 ||
+ cur_offset + array_data_offset + 2 >= insn_count) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset
+ << ", data offset " << array_data_offset << ", count " << insn_count;
+ return false;
+ }
+ /* offset to array data table is a relative branch-style offset */
+ array_data = insns + array_data_offset;
+ /* make sure the table is 32-bit aligned */
+ if ((((uint32_t) array_data) & 0x03) != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned array data table: at " << cur_offset
+ << ", data offset " << array_data_offset;
+ return false;
+ }
+ uint32_t value_width = array_data[1];
+ uint32_t value_count = *reinterpret_cast<const uint32_t*>(&array_data[2]);
+ uint32_t table_size = 4 + (value_width * value_count + 1) / 2;
+ /* make sure the end of the switch is in range */
+ if (cur_offset + array_data_offset + table_size > insn_count) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data end: at " << cur_offset
+ << ", data offset " << array_data_offset << ", end "
+ << cur_offset + array_data_offset + table_size
+ << ", count " << insn_count;
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckBranchTarget(uint32_t cur_offset) {
+ int32_t offset;
+ bool isConditional, selfOkay;
+ if (!GetBranchOffset(cur_offset, &offset, &isConditional, &selfOkay)) {
+ return false;
+ }
+ if (!selfOkay && offset == 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at" << reinterpret_cast<void*>(cur_offset);
+ return false;
+ }
+ // Check for 32-bit overflow. This isn't strictly necessary if we can depend on the runtime
+ // to have identical "wrap-around" behavior, but it's unwise to depend on that.
+ if (((int64_t) cur_offset + (int64_t) offset) != (int64_t) (cur_offset + offset)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow " << reinterpret_cast<void*>(cur_offset) << " +" << offset;
+ return false;
+ }
+ const uint32_t insn_count = code_item_->insns_size_in_code_units_;
+ int32_t abs_offset = cur_offset + offset;
+ if (abs_offset < 0 || (uint32_t) abs_offset >= insn_count || !insn_flags_[abs_offset].IsOpcode()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
+ << reinterpret_cast<void*>(abs_offset) << ") at "
+ << reinterpret_cast<void*>(cur_offset);
+ return false;
+ }
+ insn_flags_[abs_offset].SetBranchTarget();
+ return true;
+}
+
+bool MethodVerifier::GetBranchOffset(uint32_t cur_offset, int32_t* pOffset, bool* pConditional,
+ bool* selfOkay) {
+ const uint16_t* insns = code_item_->insns_ + cur_offset;
+ *pConditional = false;
+ *selfOkay = false;
+ switch (*insns & 0xff) {
+ case Instruction::GOTO:
+ *pOffset = ((int16_t) *insns) >> 8;
+ break;
+ case Instruction::GOTO_32:
+ *pOffset = insns[1] | (((uint32_t) insns[2]) << 16);
+ *selfOkay = true;
+ break;
+ case Instruction::GOTO_16:
+ *pOffset = (int16_t) insns[1];
+ break;
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ *pOffset = (int16_t) insns[1];
+ *pConditional = true;
+ break;
+ default:
+ return false;
+ break;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
+ const uint32_t insn_count = code_item_->insns_size_in_code_units_;
+ DCHECK_LT(cur_offset, insn_count);
+ const uint16_t* insns = code_item_->insns_ + cur_offset;
+ /* make sure the start of the switch is in range */
+ int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16;
+ if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 >= insn_count) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
+ << ", switch offset " << switch_offset << ", count " << insn_count;
+ return false;
+ }
+ /* offset to switch table is a relative branch-style offset */
+ const uint16_t* switch_insns = insns + switch_offset;
+ /* make sure the table is 32-bit aligned */
+ if ((((uint32_t) switch_insns) & 0x03) != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned switch table: at " << cur_offset
+ << ", switch offset " << switch_offset;
+ return false;
+ }
+ uint32_t switch_count = switch_insns[1];
+ int32_t keys_offset, targets_offset;
+ uint16_t expected_signature;
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0=sig, 1=count, 2/3=firstKey */
+ targets_offset = 4;
+ keys_offset = -1;
+ expected_signature = Instruction::kPackedSwitchSignature;
+ } else {
+ /* 0=sig, 1=count, 2..count*2 = keys */
+ keys_offset = 2;
+ targets_offset = 2 + 2 * switch_count;
+ expected_signature = Instruction::kSparseSwitchSignature;
+ }
+ uint32_t table_size = targets_offset + switch_count * 2;
+ if (switch_insns[0] != expected_signature) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << StringPrintf("wrong signature for switch table (%x, wanted %x)",
+ switch_insns[0], expected_signature);
+ return false;
+ }
+ /* make sure the end of the switch is in range */
+ if (cur_offset + switch_offset + table_size > (uint32_t) insn_count) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset << ", switch offset "
+ << switch_offset << ", end "
+ << (cur_offset + switch_offset + table_size)
+ << ", count " << insn_count;
+ return false;
+ }
+ /* for a sparse switch, verify the keys are in ascending order */
+ if (keys_offset > 0 && switch_count > 1) {
+ int32_t last_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16);
+ for (uint32_t targ = 1; targ < switch_count; targ++) {
+ int32_t key = (int32_t) switch_insns[keys_offset + targ * 2] |
+ (int32_t) (switch_insns[keys_offset + targ * 2 + 1] << 16);
+ if (key <= last_key) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid packed switch: last key=" << last_key
+ << ", this=" << key;
+ return false;
+ }
+ last_key = key;
+ }
+ }
+ /* verify each switch target */
+ for (uint32_t targ = 0; targ < switch_count; targ++) {
+ int32_t offset = (int32_t) switch_insns[targets_offset + targ * 2] |
+ (int32_t) (switch_insns[targets_offset + targ * 2 + 1] << 16);
+ int32_t abs_offset = cur_offset + offset;
+ if (abs_offset < 0 || abs_offset >= (int32_t) insn_count || !insn_flags_[abs_offset].IsOpcode()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset << " (-> "
+ << reinterpret_cast<void*>(abs_offset) << ") at "
+ << reinterpret_cast<void*>(cur_offset) << "[" << targ << "]";
+ return false;
+ }
+ insn_flags_[abs_offset].SetBranchTarget();
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckVarArgRegs(uint32_t vA, uint32_t arg[]) {
+ if (vA > 5) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << vA << ") in non-range invoke)";
+ return false;
+ }
+ uint16_t registers_size = code_item_->registers_size_;
+ for (uint32_t idx = 0; idx < vA; idx++) {
+ if (arg[idx] >= registers_size) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index (" << arg[idx]
+ << ") in non-range invoke (>= " << registers_size << ")";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool MethodVerifier::CheckVarArgRangeRegs(uint32_t vA, uint32_t vC) {
+ uint16_t registers_size = code_item_->registers_size_;
+ // vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of
+ // integer overflow when adding them here.
+ if (vA + vC > registers_size) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC << " in range invoke (> "
+ << registers_size << ")";
+ return false;
+ }
+ return true;
+}
+
+static const std::vector<uint8_t>* CreateLengthPrefixedDexGcMap(const std::vector<uint8_t>& gc_map) {
+ std::vector<uint8_t>* length_prefixed_gc_map = new std::vector<uint8_t>;
+ length_prefixed_gc_map->reserve(gc_map.size() + 4);
+ length_prefixed_gc_map->push_back((gc_map.size() & 0xff000000) >> 24);
+ length_prefixed_gc_map->push_back((gc_map.size() & 0x00ff0000) >> 16);
+ length_prefixed_gc_map->push_back((gc_map.size() & 0x0000ff00) >> 8);
+ length_prefixed_gc_map->push_back((gc_map.size() & 0x000000ff) >> 0);
+ length_prefixed_gc_map->insert(length_prefixed_gc_map->end(),
+ gc_map.begin(),
+ gc_map.end());
+ DCHECK_EQ(gc_map.size() + 4, length_prefixed_gc_map->size());
+ DCHECK_EQ(gc_map.size(),
+ static_cast<size_t>((length_prefixed_gc_map->at(0) << 24) |
+ (length_prefixed_gc_map->at(1) << 16) |
+ (length_prefixed_gc_map->at(2) << 8) |
+ (length_prefixed_gc_map->at(3) << 0)));
+ return length_prefixed_gc_map;
+}
+
+bool MethodVerifier::VerifyCodeFlow() {
+ uint16_t registers_size = code_item_->registers_size_;
+ uint32_t insns_size = code_item_->insns_size_in_code_units_;
+
+ if (registers_size * insns_size > 4*1024*1024) {
+ LOG(WARNING) << "warning: method is huge (regs=" << registers_size
+ << " insns_size=" << insns_size << ")";
+ }
+ /* Create and initialize table holding register status */
+ reg_table_.Init(kTrackCompilerInterestPoints, insn_flags_.get(), insns_size, registers_size, this);
+
+
+ work_line_.reset(new RegisterLine(registers_size, this));
+ saved_line_.reset(new RegisterLine(registers_size, this));
+
+ /* Initialize register types of method arguments. */
+ if (!SetTypesFromSignature()) {
+ DCHECK_NE(failures_.size(), 0U);
+ std::string prepend("Bad signature in ");
+ prepend += PrettyMethod(dex_method_idx_, *dex_file_);
+ PrependToLastFailMessage(prepend);
+ return false;
+ }
+ /* Perform code flow verification. */
+ if (!CodeFlowVerifyMethod()) {
+ DCHECK_NE(failures_.size(), 0U);
+ return false;
+ }
+
+ /* Generate a register map and add it to the method. */
+ UniquePtr<const std::vector<uint8_t> > map(GenerateGcMap());
+ if (map.get() == NULL) {
+ DCHECK_NE(failures_.size(), 0U);
+ return false; // Not a real failure, but a failure to encode
+ }
+ if (kIsDebugBuild) {
+ VerifyGcMap(*map);
+ }
+ MethodReference ref(dex_file_, dex_method_idx_);
+ const std::vector<uint8_t>* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get()));
+ verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
+
+ MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet();
+ if(method_to_safe_casts != NULL ) {
+ SetSafeCastMap(ref, method_to_safe_casts);
+ }
+
+ MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap();
+ if(pc_to_concrete_method != NULL ) {
+ SetDevirtMap(ref, pc_to_concrete_method);
+ }
+ return true;
+}
+
+std::ostream& MethodVerifier::DumpFailures(std::ostream& os) {
+ DCHECK_EQ(failures_.size(), failure_messages_.size());
+ for (size_t i = 0; i < failures_.size(); ++i) {
+ os << failure_messages_[i]->str() << "\n";
+ }
+ return os;
+}
+
+extern "C" void MethodVerifierGdbDump(MethodVerifier* v)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ v->Dump(std::cerr);
+}
+
+void MethodVerifier::Dump(std::ostream& os) {
+ if (code_item_ == NULL) {
+ os << "Native method\n";
+ return;
+ }
+ {
+ os << "Register Types:\n";
+ Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
+ std::ostream indent_os(&indent_filter);
+ reg_types_.Dump(indent_os);
+ }
+ os << "Dumping instructions and register lines:\n";
+ Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
+ std::ostream indent_os(&indent_filter);
+ const Instruction* inst = Instruction::At(code_item_->insns_);
+ for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_;
+ dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits()) {
+ RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
+ if (reg_line != NULL) {
+ indent_os << reg_line->Dump() << "\n";
+ }
+ indent_os << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+ const bool kDumpHexOfInstruction = false;
+ if (kDumpHexOfInstruction) {
+ indent_os << inst->DumpHex(5) << " ";
+ }
+ indent_os << inst->DumpString(dex_file_) << "\n";
+ inst = inst->Next();
+ }
+}
+
+static bool IsPrimitiveDescriptor(char descriptor) {
+ switch (descriptor) {
+ case 'I':
+ case 'C':
+ case 'S':
+ case 'B':
+ case 'Z':
+ case 'F':
+ case 'D':
+ case 'J':
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool MethodVerifier::SetTypesFromSignature() {
+ RegisterLine* reg_line = reg_table_.GetLine(0);
+ int arg_start = code_item_->registers_size_ - code_item_->ins_size_;
+ size_t expected_args = code_item_->ins_size_; /* long/double count as two */
+
+ DCHECK_GE(arg_start, 0); /* should have been verified earlier */
+ //Include the "this" pointer.
+ size_t cur_arg = 0;
+ if (!IsStatic()) {
+ // If this is a constructor for a class other than java.lang.Object, mark the first ("this")
+ // argument as uninitialized. This restricts field access until the superclass constructor is
+ // called.
+ const RegType& declaring_class = GetDeclaringClass();
+ if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
+ reg_line->SetRegisterType(arg_start + cur_arg,
+ reg_types_.UninitializedThisArgument(declaring_class));
+ } else {
+ reg_line->SetRegisterType(arg_start + cur_arg, declaring_class);
+ }
+ cur_arg++;
+ }
+
+ const DexFile::ProtoId& proto_id =
+ dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
+ DexFileParameterIterator iterator(*dex_file_, proto_id);
+
+ for (; iterator.HasNext(); iterator.Next()) {
+ const char* descriptor = iterator.GetDescriptor();
+ if (descriptor == NULL) {
+ LOG(FATAL) << "Null descriptor";
+ }
+ if (cur_arg >= expected_args) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args
+ << " args, found more (" << descriptor << ")";
+ return false;
+ }
+ switch (descriptor[0]) {
+ case 'L':
+ case '[':
+ // We assume that reference arguments are initialized. The only way it could be otherwise
+ // (assuming the caller was verified) is if the current method is <init>, but in that case
+ // it's effectively considered initialized the instant we reach here (in the sense that we
+ // can return without doing anything or call virtual methods).
+ {
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_type);
+ }
+ break;
+ case 'Z':
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Boolean());
+ break;
+ case 'C':
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Char());
+ break;
+ case 'B':
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Byte());
+ break;
+ case 'I':
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Integer());
+ break;
+ case 'S':
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Short());
+ break;
+ case 'F':
+ reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Float());
+ break;
+ case 'J':
+ case 'D': {
+ const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+ const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+ reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
+ cur_arg++;
+ break;
+ }
+ default:
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected signature type char '" << descriptor << "'";
+ return false;
+ }
+ cur_arg++;
+ }
+ if (cur_arg != expected_args) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args << " arguments, found " << cur_arg;
+ return false;
+ }
+ const char* descriptor = dex_file_->GetReturnTypeDescriptor(proto_id);
+ // Validate return type. We don't do the type lookup; just want to make sure that it has the right
+ // format. Only major difference from the method argument format is that 'V' is supported.
+ bool result;
+ if (IsPrimitiveDescriptor(descriptor[0]) || descriptor[0] == 'V') {
+ result = descriptor[1] == '\0';
+ } else if (descriptor[0] == '[') { // single/multi-dimensional array of object/primitive
+ size_t i = 0;
+ do {
+ i++;
+ } while (descriptor[i] == '['); // process leading [
+ if (descriptor[i] == 'L') { // object array
+ do {
+ i++; // find closing ;
+ } while (descriptor[i] != ';' && descriptor[i] != '\0');
+ result = descriptor[i] == ';';
+ } else { // primitive array
+ result = IsPrimitiveDescriptor(descriptor[i]) && descriptor[i + 1] == '\0';
+ }
+ } else if (descriptor[0] == 'L') {
+ // could be more thorough here, but shouldn't be required
+ size_t i = 0;
+ do {
+ i++;
+ } while (descriptor[i] != ';' && descriptor[i] != '\0');
+ result = descriptor[i] == ';';
+ } else {
+ result = false;
+ }
+ if (!result) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected char in return type descriptor '"
+ << descriptor << "'";
+ }
+ return result;
+}
+
+bool MethodVerifier::CodeFlowVerifyMethod() {
+ const uint16_t* insns = code_item_->insns_;
+ const uint32_t insns_size = code_item_->insns_size_in_code_units_;
+
+ /* Begin by marking the first instruction as "changed". */
+ insn_flags_[0].SetChanged();
+ uint32_t start_guess = 0;
+
+ /* Continue until no instructions are marked "changed". */
+ while (true) {
+ // Find the first marked one. Use "start_guess" as a way to find one quickly.
+ uint32_t insn_idx = start_guess;
+ for (; insn_idx < insns_size; insn_idx++) {
+ if (insn_flags_[insn_idx].IsChanged())
+ break;
+ }
+ if (insn_idx == insns_size) {
+ if (start_guess != 0) {
+ /* try again, starting from the top */
+ start_guess = 0;
+ continue;
+ } else {
+ /* all flags are clear */
+ break;
+ }
+ }
+ // We carry the working set of registers from instruction to instruction. If this address can
+ // be the target of a branch (or throw) instruction, or if we're skipping around chasing
+ // "changed" flags, we need to load the set of registers from the table.
+ // Because we always prefer to continue on to the next instruction, we should never have a
+ // situation where we have a stray "changed" flag set on an instruction that isn't a branch
+ // target.
+ work_insn_idx_ = insn_idx;
+ if (insn_flags_[insn_idx].IsBranchTarget()) {
+ work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
+ } else {
+#ifndef NDEBUG
+ /*
+ * Sanity check: retrieve the stored register line (assuming
+ * a full table) and make sure it actually matches.
+ */
+ RegisterLine* register_line = reg_table_.GetLine(insn_idx);
+ if (register_line != NULL) {
+ if (work_line_->CompareLine(register_line) != 0) {
+ Dump(std::cout);
+ std::cout << info_messages_.str();
+ LOG(FATAL) << "work_line diverged in " << PrettyMethod(dex_method_idx_, *dex_file_)
+ << "@" << reinterpret_cast<void*>(work_insn_idx_) << "\n"
+ << " work_line=" << *work_line_ << "\n"
+ << " expected=" << *register_line;
+ }
+ }
+#endif
+ }
+ if (!CodeFlowVerifyInstruction(&start_guess)) {
+ std::string prepend(PrettyMethod(dex_method_idx_, *dex_file_));
+ prepend += " failed to verify: ";
+ PrependToLastFailMessage(prepend);
+ return false;
+ }
+ /* Clear "changed" and mark as visited. */
+ insn_flags_[insn_idx].SetVisited();
+ insn_flags_[insn_idx].ClearChanged();
+ }
+
+ if (gDebugVerify) {
+ /*
+ * Scan for dead code. There's nothing "evil" about dead code
+ * (besides the wasted space), but it indicates a flaw somewhere
+ * down the line, possibly in the verifier.
+ *
+ * If we've substituted "always throw" instructions into the stream,
+ * we are almost certainly going to have some dead code.
+ */
+ int dead_start = -1;
+ uint32_t insn_idx = 0;
+ for (; insn_idx < insns_size; insn_idx += insn_flags_[insn_idx].GetLengthInCodeUnits()) {
+ /*
+ * Switch-statement data doesn't get "visited" by scanner. It
+ * may or may not be preceded by a padding NOP (for alignment).
+ */
+ if (insns[insn_idx] == Instruction::kPackedSwitchSignature ||
+ insns[insn_idx] == Instruction::kSparseSwitchSignature ||
+ insns[insn_idx] == Instruction::kArrayDataSignature ||
+ (insns[insn_idx] == Instruction::NOP && (insn_idx + 1 < insns_size) &&
+ (insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
+ insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
+ insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
+ insn_flags_[insn_idx].SetVisited();
+ }
+
+ if (!insn_flags_[insn_idx].IsVisited()) {
+ if (dead_start < 0)
+ dead_start = insn_idx;
+ } else if (dead_start >= 0) {
+ LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start) << "-" << reinterpret_cast<void*>(insn_idx - 1);
+ dead_start = -1;
+ }
+ }
+ if (dead_start >= 0) {
+ LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start) << "-" << reinterpret_cast<void*>(insn_idx - 1);
+ }
+ // To dump the state of the verify after a method, do something like:
+ // if (PrettyMethod(dex_method_idx_, *dex_file_) ==
+ // "boolean java.lang.String.equals(java.lang.Object)") {
+ // LOG(INFO) << info_messages_.str();
+ // }
+ }
+ return true;
+}
+
+bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
+ // If we're doing FindLocksAtDexPc, check whether we're at the dex pc we care about.
+ // We want the state _before_ the instruction, for the case where the dex pc we're
+ // interested in is itself a monitor-enter instruction (which is a likely place
+ // for a thread to be suspended).
+ if (monitor_enter_dex_pcs_ != NULL && work_insn_idx_ == interesting_dex_pc_) {
+ monitor_enter_dex_pcs_->clear(); // The new work line is more accurate than the previous one.
+ for (size_t i = 0; i < work_line_->GetMonitorEnterCount(); ++i) {
+ monitor_enter_dex_pcs_->push_back(work_line_->GetMonitorEnterDexPc(i));
+ }
+ }
+
+ /*
+ * Once we finish decoding the instruction, we need to figure out where
+ * we can go from here. There are three possible ways to transfer
+ * control to another statement:
+ *
+ * (1) Continue to the next instruction. Applies to all but
+ * unconditional branches, method returns, and exception throws.
+ * (2) Branch to one or more possible locations. Applies to branches
+ * and switch statements.
+ * (3) Exception handlers. Applies to any instruction that can
+ * throw an exception that is handled by an encompassing "try"
+ * block.
+ *
+ * We can also return, in which case there is no successor instruction
+ * from this point.
+ *
+ * The behavior can be determined from the opcode flags.
+ */
+ const uint16_t* insns = code_item_->insns_ + work_insn_idx_;
+ const Instruction* inst = Instruction::At(insns);
+ int opcode_flags = Instruction::FlagsOf(inst->Opcode());
+
+ int32_t branch_target = 0;
+ bool just_set_result = false;
+ if (gDebugVerify) {
+ // Generate processing back trace to debug verifier
+ LogVerifyInfo() << "Processing " << inst->DumpString(dex_file_) << "\n"
+ << *work_line_.get() << "\n";
+ }
+
+ /*
+ * Make a copy of the previous register state. If the instruction
+ * can throw an exception, we will copy/merge this into the "catch"
+ * address rather than work_line, because we don't want the result
+ * from the "successful" code path (e.g. a check-cast that "improves"
+ * a type) to be visible to the exception handler.
+ */
+ if ((opcode_flags & Instruction::kThrow) != 0 && CurrentInsnFlags()->IsInTry()) {
+ saved_line_->CopyFromLine(work_line_.get());
+ } else {
+#ifndef NDEBUG
+ saved_line_->FillWithGarbage();
+#endif
+ }
+
+
+ // We need to ensure the work line is consistent while performing validation. When we spot a
+ // peephole pattern we compute a new line for either the fallthrough instruction or the
+ // branch target.
+ UniquePtr<RegisterLine> branch_line;
+ UniquePtr<RegisterLine> fallthrough_line;
+
+ switch (inst->Opcode()) {
+ case Instruction::NOP:
+ /*
+ * A "pure" NOP has no effect on anything. Data tables start with
+ * a signature that looks like a NOP; if we see one of these in
+ * the course of executing code then we have a problem.
+ */
+ if (inst->VRegA_10x() != 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "encountered data table in instruction stream";
+ }
+ break;
+
+ case Instruction::MOVE:
+ work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
+ break;
+ case Instruction::MOVE_FROM16:
+ work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
+ break;
+ case Instruction::MOVE_16:
+ work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
+ break;
+ case Instruction::MOVE_WIDE:
+ work_line_->CopyRegister2(inst->VRegA_12x(), inst->VRegB_12x());
+ break;
+ case Instruction::MOVE_WIDE_FROM16:
+ work_line_->CopyRegister2(inst->VRegA_22x(), inst->VRegB_22x());
+ break;
+ case Instruction::MOVE_WIDE_16:
+ work_line_->CopyRegister2(inst->VRegA_32x(), inst->VRegB_32x());
+ break;
+ case Instruction::MOVE_OBJECT:
+ work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
+ break;
+ case Instruction::MOVE_OBJECT_FROM16:
+ work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
+ break;
+ case Instruction::MOVE_OBJECT_16:
+ work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
+ break;
+
+ /*
+ * The move-result instructions copy data out of a "pseudo-register"
+ * with the results from the last method invocation. In practice we
+ * might want to hold the result in an actual CPU register, so the
+ * Dalvik spec requires that these only appear immediately after an
+ * invoke or filled-new-array.
+ *
+ * These calls invalidate the "result" register. (This is now
+ * redundant with the reset done below, but it can make the debug info
+ * easier to read in some cases.)
+ */
+ case Instruction::MOVE_RESULT:
+ work_line_->CopyResultRegister1(inst->VRegA_11x(), false);
+ break;
+ case Instruction::MOVE_RESULT_WIDE:
+ work_line_->CopyResultRegister2(inst->VRegA_11x());
+ break;
+ case Instruction::MOVE_RESULT_OBJECT:
+ work_line_->CopyResultRegister1(inst->VRegA_11x(), true);
+ break;
+
+ case Instruction::MOVE_EXCEPTION: {
+ /*
+ * This statement can only appear as the first instruction in an exception handler. We verify
+ * that as part of extracting the exception type from the catch block list.
+ */
+ const RegType& res_type = GetCaughtExceptionType();
+ work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
+ break;
+ }
+ case Instruction::RETURN_VOID:
+ if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+ if (!GetMethodReturnType().IsConflict()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected";
+ }
+ }
+ break;
+ case Instruction::RETURN:
+ if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+ /* check the method signature */
+ const RegType& return_type = GetMethodReturnType();
+ if (!return_type.IsCategory1Types()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type " << return_type;
+ } else {
+ // Compilers may generate synthetic functions that write byte values into boolean fields.
+ // Also, it may use integer values for boolean, byte, short, and character return types.
+ const uint32_t vregA = inst->VRegA_11x();
+ const RegType& src_type = work_line_->GetRegisterType(vregA);
+ bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
+ ((return_type.IsBoolean() || return_type.IsByte() ||
+ return_type.IsShort() || return_type.IsChar()) &&
+ src_type.IsInteger()));
+ /* check the register contents */
+ bool success =
+ work_line_->VerifyRegisterType(vregA, use_src ? src_type : return_type);
+ if (!success) {
+ AppendToLastFailMessage(StringPrintf(" return-1nr on invalid register v%d", vregA));
+ }
+ }
+ }
+ break;
+ case Instruction::RETURN_WIDE:
+ if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+ /* check the method signature */
+ const RegType& return_type = GetMethodReturnType();
+ if (!return_type.IsCategory2Types()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
+ } else {
+ /* check the register contents */
+ const uint32_t vregA = inst->VRegA_11x();
+ bool success = work_line_->VerifyRegisterType(vregA, return_type);
+ if (!success) {
+ AppendToLastFailMessage(StringPrintf(" return-wide on invalid register v%d", vregA));
+ }
+ }
+ }
+ break;
+ case Instruction::RETURN_OBJECT:
+ if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+ const RegType& return_type = GetMethodReturnType();
+ if (!return_type.IsReferenceTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
+ } else {
+ /* return_type is the *expected* return type, not register value */
+ DCHECK(!return_type.IsZero());
+ DCHECK(!return_type.IsUninitializedReference());
+ const uint32_t vregA = inst->VRegA_11x();
+ const RegType& reg_type = work_line_->GetRegisterType(vregA);
+ // Disallow returning uninitialized values and verify that the reference in vAA is an
+ // instance of the "return_type"
+ if (reg_type.IsUninitializedTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "returning uninitialized object '" << reg_type << "'";
+ } else if (!return_type.IsAssignableFrom(reg_type)) {
+ Fail(reg_type.IsUnresolvedTypes() ? VERIFY_ERROR_BAD_CLASS_SOFT : VERIFY_ERROR_BAD_CLASS_HARD)
+ << "returning '" << reg_type << "', but expected from declaration '" << return_type << "'";
+ }
+ }
+ }
+ break;
+
+ /* could be boolean, int, float, or a null reference */
+ case Instruction::CONST_4: {
+ int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
+ work_line_->SetRegisterType(inst->VRegA_11n(), reg_types_.FromCat1Const(val, true));
+ break;
+ }
+ case Instruction::CONST_16: {
+ int16_t val = static_cast<int16_t>(inst->VRegB_21s());
+ work_line_->SetRegisterType(inst->VRegA_21s(), reg_types_.FromCat1Const(val, true));
+ break;
+ }
+ case Instruction::CONST:
+ work_line_->SetRegisterType(inst->VRegA_31i(),
+ reg_types_.FromCat1Const(inst->VRegB_31i(), true));
+ break;
+ case Instruction::CONST_HIGH16:
+ work_line_->SetRegisterType(inst->VRegA_21h(),
+ reg_types_.FromCat1Const(inst->VRegB_21h() << 16, true));
+ break;
+ /* could be long or double; resolved upon use */
+ case Instruction::CONST_WIDE_16: {
+ int64_t val = static_cast<int16_t>(inst->VRegB_21s());
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
+ break;
+ }
+ case Instruction::CONST_WIDE_32: {
+ int64_t val = static_cast<int32_t>(inst->VRegB_31i());
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
+ break;
+ }
+ case Instruction::CONST_WIDE: {
+ int64_t val = inst->VRegB_51l();
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
+ break;
+ }
+ case Instruction::CONST_WIDE_HIGH16: {
+ int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
+ const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
+ break;
+ }
+ case Instruction::CONST_STRING:
+ work_line_->SetRegisterType(inst->VRegA_21c(), reg_types_.JavaLangString());
+ break;
+ case Instruction::CONST_STRING_JUMBO:
+ work_line_->SetRegisterType(inst->VRegA_31c(), reg_types_.JavaLangString());
+ break;
+ case Instruction::CONST_CLASS: {
+ // Get type from instruction if unresolved then we need an access check
+ // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
+ const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ // Register holds class, ie its type is class, on error it will hold Conflict.
+ work_line_->SetRegisterType(inst->VRegA_21c(),
+ res_type.IsConflict() ? res_type
+ : reg_types_.JavaLangClass(true));
+ break;
+ }
+ case Instruction::MONITOR_ENTER:
+ work_line_->PushMonitor(inst->VRegA_11x(), work_insn_idx_);
+ break;
+ case Instruction::MONITOR_EXIT:
+ /*
+ * monitor-exit instructions are odd. They can throw exceptions,
+ * but when they do they act as if they succeeded and the PC is
+ * pointing to the following instruction. (This behavior goes back
+ * to the need to handle asynchronous exceptions, a now-deprecated
+ * feature that Dalvik doesn't support.)
+ *
+ * In practice we don't need to worry about this. The only
+ * exceptions that can be thrown from monitor-exit are for a
+ * null reference and -exit without a matching -enter. If the
+ * structured locking checks are working, the former would have
+ * failed on the -enter instruction, and the latter is impossible.
+ *
+ * This is fortunate, because issue 3221411 prevents us from
+ * chasing the "can throw" path when monitor verification is
+ * enabled. If we can fully verify the locking we can ignore
+ * some catch blocks (which will show up as "dead" code when
+ * we skip them here); if we can't, then the code path could be
+ * "live" so we still need to check it.
+ */
+ opcode_flags &= ~Instruction::kThrow;
+ work_line_->PopMonitor(inst->VRegA_11x());
+ break;
+
+ case Instruction::CHECK_CAST:
+ case Instruction::INSTANCE_OF: {
+ /*
+ * If this instruction succeeds, we will "downcast" register vA to the type in vB. (This
+ * could be a "upcast" -- not expected, so we don't try to address it.)
+ *
+ * If it fails, an exception is thrown, which we deal with later by ignoring the update to
+ * dec_insn.vA when branching to a handler.
+ */
+ const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
+ const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
+ const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ if (res_type.IsConflict()) {
+ DCHECK_NE(failures_.size(), 0U);
+ if (!is_checkcast) {
+ work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
+ }
+ break; // bad class
+ }
+ // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
+ uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
+ const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+ if (!res_type.IsNonZeroReferenceTypes()) {
+ if (is_checkcast) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on unexpected class " << res_type;
+ }
+ } else if (!orig_type.IsReferenceTypes()) {
+ if (is_checkcast) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on non-reference in v" << orig_type_reg;
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance-of on non-reference in v" << orig_type_reg;
+ }
+ } else {
+ if (is_checkcast) {
+ work_line_->SetRegisterType(inst->VRegA_21c(), res_type);
+ } else {
+ work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
+ }
+ }
+ break;
+ }
+ case Instruction::ARRAY_LENGTH: {
+ const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+ if (res_type.IsReferenceTypes()) {
+ if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
+ } else {
+ work_line_->SetRegisterType(inst->VRegA_12x(), reg_types_.Integer());
+ }
+ }
+ break;
+ }
+ case Instruction::NEW_INSTANCE: {
+ const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ if (res_type.IsConflict()) {
+ DCHECK_NE(failures_.size(), 0U);
+ break; // bad class
+ }
+ // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
+ // can't create an instance of an interface or abstract class */
+ if (!res_type.IsInstantiableTypes()) {
+ Fail(VERIFY_ERROR_INSTANTIATION)
+ << "new-instance on primitive, interface or abstract class" << res_type;
+ // Soft failure so carry on to set register type.
+ }
+ const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+ // Any registers holding previous allocations from this address that have not yet been
+ // initialized must be marked invalid.
+ work_line_->MarkUninitRefsAsInvalid(uninit_type);
+ // add the new uninitialized reference to the register state
+ work_line_->SetRegisterType(inst->VRegA_21c(), uninit_type);
+ break;
+ }
+ case Instruction::NEW_ARRAY:
+ VerifyNewArray(inst, false, false);
+ break;
+ case Instruction::FILLED_NEW_ARRAY:
+ VerifyNewArray(inst, true, false);
+ just_set_result = true; // Filled new array sets result register
+ break;
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ VerifyNewArray(inst, true, true);
+ just_set_result = true; // Filled new array range sets result register
+ break;
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPG_FLOAT:
+ if (!work_line_->VerifyRegisterType(inst->VRegB_23x(), reg_types_.Float())) {
+ break;
+ }
+ if (!work_line_->VerifyRegisterType(inst->VRegC_23x(), reg_types_.Float())) {
+ break;
+ }
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+ break;
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_DOUBLE:
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.DoubleLo(),
+ reg_types_.DoubleHi())) {
+ break;
+ }
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.DoubleLo(),
+ reg_types_.DoubleHi())) {
+ break;
+ }
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+ break;
+ case Instruction::CMP_LONG:
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.LongLo(),
+ reg_types_.LongHi())) {
+ break;
+ }
+ if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.LongLo(),
+ reg_types_.LongHi())) {
+ break;
+ }
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+ break;
+ case Instruction::THROW: {
+ const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+ if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "thrown class " << res_type << " not instanceof Throwable";
+ }
+ break;
+ }
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ /* no effect on or use of registers */
+ break;
+
+ case Instruction::PACKED_SWITCH:
+ case Instruction::SPARSE_SWITCH:
+ /* verify that vAA is an integer, or can be converted to one */
+ work_line_->VerifyRegisterType(inst->VRegA_31t(), reg_types_.Integer());
+ break;
+
+ case Instruction::FILL_ARRAY_DATA: {
+ /* Similar to the verification done for APUT */
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+ /* array_type can be null if the reg type is Zero */
+ if (!array_type.IsZero()) {
+ if (!array_type.IsArrayTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type " << array_type;
+ } else {
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
+ DCHECK(!component_type.IsConflict());
+ if (component_type.IsNonZeroReferenceTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
+ << component_type;
+ } else {
+ // Now verify if the element width in the table matches the element width declared in
+ // the array
+ const uint16_t* array_data = insns + (insns[1] | (((int32_t) insns[2]) << 16));
+ if (array_data[0] != Instruction::kArrayDataSignature) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid magic for array-data";
+ } else {
+ size_t elem_width = Primitive::ComponentSize(component_type.GetPrimitiveType());
+ // Since we don't compress the data in Dex, expect to see equal width of data stored
+ // in the table and expected from the array class.
+ if (array_data[1] != elem_width) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-data size mismatch (" << array_data[1]
+ << " vs " << elem_width << ")";
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE: {
+ const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ bool mismatch = false;
+ if (reg_type1.IsZero()) { // zero then integral or reference expected
+ mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
+ } else if (reg_type1.IsReferenceTypes()) { // both references?
+ mismatch = !reg_type2.IsReferenceTypes();
+ } else { // both integral?
+ mismatch = !reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes();
+ }
+ if (mismatch) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to if-eq/if-ne (" << reg_type1 << "," << reg_type2
+ << ") must both be references or integral";
+ }
+ break;
+ }
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE: {
+ const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
+ << reg_type2 << ") must be integral";
+ }
+ break;
+ }
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ: {
+ const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type << " unexpected as arg to if-eqz/if-nez";
+ }
+
+ // Find previous instruction - its existence is a precondition to peephole optimization.
+ uint32_t instance_of_idx = 0;
+ if (0 != work_insn_idx_) {
+ instance_of_idx = work_insn_idx_ - 1;
+ while(0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+ instance_of_idx--;
+ }
+ CHECK(insn_flags_[instance_of_idx].IsOpcode());
+ } else {
+ break;
+ }
+
+ const Instruction* instance_of_inst = Instruction::At(code_item_->insns_ + instance_of_idx);
+
+ /* Check for peep-hole pattern of:
+ * ...;
+ * instance-of vX, vY, T;
+ * ifXXX vX, label ;
+ * ...;
+ * label:
+ * ...;
+ * and sharpen the type of vY to be type T.
+ * Note, this pattern can't be if:
+ * - if there are other branches to this branch,
+ * - when vX == vY.
+ */
+ if (!CurrentInsnFlags()->IsBranchTarget() &&
+ (Instruction::INSTANCE_OF == instance_of_inst->Opcode()) &&
+ (inst->VRegA_21t() == instance_of_inst->VRegA_22c()) &&
+ (instance_of_inst->VRegA_22c() != instance_of_inst->VRegB_22c())) {
+ // Check that the we are not attempting conversion to interface types,
+ // which is not done because of the multiple inheritance implications.
+ const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+
+ if(!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
+ RegisterLine* update_line = new RegisterLine(code_item_->registers_size_, this);
+ if (inst->Opcode() == Instruction::IF_EQZ) {
+ fallthrough_line.reset(update_line);
+ } else {
+ branch_line.reset(update_line);
+ }
+ update_line->CopyFromLine(work_line_.get());
+ update_line->SetRegisterType(instance_of_inst->VRegB_22c(), cast_type);
+ if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+ // See if instance-of was preceded by a move-object operation, common due to the small
+ // register encoding space of instance-of, and propagate type information to the source
+ // of the move-object.
+ uint32_t move_idx = instance_of_idx - 1;
+ while(0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+ move_idx--;
+ }
+ CHECK(insn_flags_[move_idx].IsOpcode());
+ const Instruction* move_inst = Instruction::At(code_item_->insns_ + move_idx);
+ switch (move_inst->Opcode()) {
+ case Instruction::MOVE_OBJECT:
+ if (move_inst->VRegA_12x() == instance_of_inst->VRegB_22c()) {
+ update_line->SetRegisterType(move_inst->VRegB_12x(), cast_type);
+ }
+ break;
+ case Instruction::MOVE_OBJECT_FROM16:
+ if (move_inst->VRegA_22x() == instance_of_inst->VRegB_22c()) {
+ update_line->SetRegisterType(move_inst->VRegB_22x(), cast_type);
+ }
+ break;
+ case Instruction::MOVE_OBJECT_16:
+ if (move_inst->VRegA_32x() == instance_of_inst->VRegB_22c()) {
+ update_line->SetRegisterType(move_inst->VRegB_32x(), cast_type);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ break;
+ }
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ: {
+ const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ if (!reg_type.IsIntegralTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
+ << " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
+ }
+ break;
+ }
+ case Instruction::AGET_BOOLEAN:
+ VerifyAGet(inst, reg_types_.Boolean(), true);
+ break;
+ case Instruction::AGET_BYTE:
+ VerifyAGet(inst, reg_types_.Byte(), true);
+ break;
+ case Instruction::AGET_CHAR:
+ VerifyAGet(inst, reg_types_.Char(), true);
+ break;
+ case Instruction::AGET_SHORT:
+ VerifyAGet(inst, reg_types_.Short(), true);
+ break;
+ case Instruction::AGET:
+ VerifyAGet(inst, reg_types_.Integer(), true);
+ break;
+ case Instruction::AGET_WIDE:
+ VerifyAGet(inst, reg_types_.LongLo(), true);
+ break;
+ case Instruction::AGET_OBJECT:
+ VerifyAGet(inst, reg_types_.JavaLangObject(false), false);
+ break;
+
+ case Instruction::APUT_BOOLEAN:
+ VerifyAPut(inst, reg_types_.Boolean(), true);
+ break;
+ case Instruction::APUT_BYTE:
+ VerifyAPut(inst, reg_types_.Byte(), true);
+ break;
+ case Instruction::APUT_CHAR:
+ VerifyAPut(inst, reg_types_.Char(), true);
+ break;
+ case Instruction::APUT_SHORT:
+ VerifyAPut(inst, reg_types_.Short(), true);
+ break;
+ case Instruction::APUT:
+ VerifyAPut(inst, reg_types_.Integer(), true);
+ break;
+ case Instruction::APUT_WIDE:
+ VerifyAPut(inst, reg_types_.LongLo(), true);
+ break;
+ case Instruction::APUT_OBJECT:
+ VerifyAPut(inst, reg_types_.JavaLangObject(false), false);
+ break;
+
+ case Instruction::IGET_BOOLEAN:
+ VerifyISGet(inst, reg_types_.Boolean(), true, false);
+ break;
+ case Instruction::IGET_BYTE:
+ VerifyISGet(inst, reg_types_.Byte(), true, false);
+ break;
+ case Instruction::IGET_CHAR:
+ VerifyISGet(inst, reg_types_.Char(), true, false);
+ break;
+ case Instruction::IGET_SHORT:
+ VerifyISGet(inst, reg_types_.Short(), true, false);
+ break;
+ case Instruction::IGET:
+ VerifyISGet(inst, reg_types_.Integer(), true, false);
+ break;
+ case Instruction::IGET_WIDE:
+ VerifyISGet(inst, reg_types_.LongLo(), true, false);
+ break;
+ case Instruction::IGET_OBJECT:
+ VerifyISGet(inst, reg_types_.JavaLangObject(false), false, false);
+ break;
+
+ case Instruction::IPUT_BOOLEAN:
+ VerifyISPut(inst, reg_types_.Boolean(), true, false);
+ break;
+ case Instruction::IPUT_BYTE:
+ VerifyISPut(inst, reg_types_.Byte(), true, false);
+ break;
+ case Instruction::IPUT_CHAR:
+ VerifyISPut(inst, reg_types_.Char(), true, false);
+ break;
+ case Instruction::IPUT_SHORT:
+ VerifyISPut(inst, reg_types_.Short(), true, false);
+ break;
+ case Instruction::IPUT:
+ VerifyISPut(inst, reg_types_.Integer(), true, false);
+ break;
+ case Instruction::IPUT_WIDE:
+ VerifyISPut(inst, reg_types_.LongLo(), true, false);
+ break;
+ case Instruction::IPUT_OBJECT:
+ VerifyISPut(inst, reg_types_.JavaLangObject(false), false, false);
+ break;
+
+ case Instruction::SGET_BOOLEAN:
+ VerifyISGet(inst, reg_types_.Boolean(), true, true);
+ break;
+ case Instruction::SGET_BYTE:
+ VerifyISGet(inst, reg_types_.Byte(), true, true);
+ break;
+ case Instruction::SGET_CHAR:
+ VerifyISGet(inst, reg_types_.Char(), true, true);
+ break;
+ case Instruction::SGET_SHORT:
+ VerifyISGet(inst, reg_types_.Short(), true, true);
+ break;
+ case Instruction::SGET:
+ VerifyISGet(inst, reg_types_.Integer(), true, true);
+ break;
+ case Instruction::SGET_WIDE:
+ VerifyISGet(inst, reg_types_.LongLo(), true, true);
+ break;
+ case Instruction::SGET_OBJECT:
+ VerifyISGet(inst, reg_types_.JavaLangObject(false), false, true);
+ break;
+
+ case Instruction::SPUT_BOOLEAN:
+ VerifyISPut(inst, reg_types_.Boolean(), true, true);
+ break;
+ case Instruction::SPUT_BYTE:
+ VerifyISPut(inst, reg_types_.Byte(), true, true);
+ break;
+ case Instruction::SPUT_CHAR:
+ VerifyISPut(inst, reg_types_.Char(), true, true);
+ break;
+ case Instruction::SPUT_SHORT:
+ VerifyISPut(inst, reg_types_.Short(), true, true);
+ break;
+ case Instruction::SPUT:
+ VerifyISPut(inst, reg_types_.Integer(), true, true);
+ break;
+ case Instruction::SPUT_WIDE:
+ VerifyISPut(inst, reg_types_.LongLo(), true, true);
+ break;
+ case Instruction::SPUT_OBJECT:
+ VerifyISPut(inst, reg_types_.JavaLangObject(false), false, true);
+ break;
+
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_SUPER_RANGE: {
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE ||
+ inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
+ bool is_super = (inst->Opcode() == Instruction::INVOKE_SUPER ||
+ inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL,
+ is_range, is_super);
+ const char* descriptor;
+ if (called_method == NULL) {
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ } else {
+ descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
+ }
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (!return_type.IsLowHalf()) {
+ work_line_->SetResultRegisterType(return_type);
+ } else {
+ work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
+ }
+ just_set_result = true;
+ break;
+ }
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE: {
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_DIRECT,
+ is_range, false);
+ const char* return_type_descriptor;
+ bool is_constructor;
+ if (called_method == NULL) {
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ is_constructor = StringPiece(dex_file_->GetMethodName(method_id)) == "<init>";
+ uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+ return_type_descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ } else {
+ is_constructor = called_method->IsConstructor();
+ return_type_descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
+ }
+ if (is_constructor) {
+ /*
+ * Some additional checks when calling a constructor. We know from the invocation arg check
+ * that the "this" argument is an instance of called_method->klass. Now we further restrict
+ * that to require that called_method->klass is the same as this->klass or this->super,
+ * allowing the latter only if the "this" argument is the same as the "this" argument to
+ * this method (which implies that we're in a constructor ourselves).
+ */
+ const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ if (this_type.IsConflict()) // failure.
+ break;
+
+ /* no null refs allowed (?) */
+ if (this_type.IsZero()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unable to initialize null ref";
+ break;
+ }
+
+ /* must be in same class or in superclass */
+ // const RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
+ // TODO: re-enable constructor type verification
+ // if (this_super_klass.IsConflict()) {
+ // Unknown super class, fail so we re-check at runtime.
+ // Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "super class unknown for '" << this_type << "'";
+ // break;
+ // }
+
+ /* arg must be an uninitialized reference */
+ if (!this_type.IsUninitializedTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected initialization on uninitialized reference "
+ << this_type;
+ break;
+ }
+
+ /*
+ * Replace the uninitialized reference with an initialized one. We need to do this for all
+ * registers that have the same object instance in them, not just the "this" register.
+ */
+ work_line_->MarkRefsAsInitialized(this_type);
+ }
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_, return_type_descriptor,
+ false);
+ if (!return_type.IsLowHalf()) {
+ work_line_->SetResultRegisterType(return_type);
+ } else {
+ work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
+ }
+ just_set_result = true;
+ break;
+ }
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE: {
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_STATIC, is_range, false);
+ const char* descriptor;
+ if (called_method == NULL) {
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ } else {
+ descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
+ }
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (!return_type.IsLowHalf()) {
+ work_line_->SetResultRegisterType(return_type);
+ } else {
+ work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
+ }
+ just_set_result = true;
+ }
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE: {
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+ mirror::AbstractMethod* abs_method = VerifyInvocationArgs(inst, METHOD_INTERFACE, is_range, false);
+ if (abs_method != NULL) {
+ mirror::Class* called_interface = abs_method->GetDeclaringClass();
+ if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
+ Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected interface class in invoke-interface '"
+ << PrettyMethod(abs_method) << "'";
+ break;
+ }
+ }
+ /* Get the type of the "this" arg, which should either be a sub-interface of called
+ * interface or Object (see comments in RegType::JoinClass).
+ */
+ const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ if (this_type.IsZero()) {
+ /* null pointer always passes (and always fails at runtime) */
+ } else {
+ if (this_type.IsUninitializedTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface call on uninitialized object "
+ << this_type;
+ break;
+ }
+ // In the past we have tried to assert that "called_interface" is assignable
+ // from "this_type.GetClass()", however, as we do an imprecise Join
+ // (RegType::JoinClass) we don't have full information on what interfaces are
+ // implemented by "this_type". For example, two classes may implement the same
+ // interfaces and have a common parent that doesn't implement the interface. The
+ // join will set "this_type" to the parent class and a test that this implements
+ // the interface will incorrectly fail.
+ }
+ /*
+ * We don't have an object instance, so we can't find the concrete method. However, all of
+ * the type information is in the abstract method, so we're good.
+ */
+ const char* descriptor;
+ if (abs_method == NULL) {
+ uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ } else {
+ descriptor = MethodHelper(abs_method).GetReturnTypeDescriptor();
+ }
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (!return_type.IsLowHalf()) {
+ work_line_->SetResultRegisterType(return_type);
+ } else {
+ work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
+ }
+ just_set_result = true;
+ break;
+ }
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Integer());
+ break;
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.LongLo(), reg_types_.LongHi());
+ break;
+ case Instruction::NEG_FLOAT:
+ work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Float());
+ break;
+ case Instruction::NEG_DOUBLE:
+ work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi());
+ break;
+ case Instruction::INT_TO_LONG:
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.Integer());
+ break;
+ case Instruction::INT_TO_FLOAT:
+ work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Integer());
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.Integer());
+ break;
+ case Instruction::LONG_TO_INT:
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
+ reg_types_.LongLo(), reg_types_.LongHi());
+ break;
+ case Instruction::LONG_TO_FLOAT:
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
+ reg_types_.LongLo(), reg_types_.LongHi());
+ break;
+ case Instruction::LONG_TO_DOUBLE:
+ work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.LongLo(), reg_types_.LongHi());
+ break;
+ case Instruction::FLOAT_TO_INT:
+ work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Float());
+ break;
+ case Instruction::FLOAT_TO_LONG:
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.Float());
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.Float());
+ break;
+ case Instruction::DOUBLE_TO_INT:
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi());
+ break;
+ case Instruction::DOUBLE_TO_LONG:
+ work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi());
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi());
+ break;
+ case Instruction::INT_TO_BYTE:
+ work_line_->CheckUnaryOp(inst, reg_types_.Byte(), reg_types_.Integer());
+ break;
+ case Instruction::INT_TO_CHAR:
+ work_line_->CheckUnaryOp(inst, reg_types_.Char(), reg_types_.Integer());
+ break;
+ case Instruction::INT_TO_SHORT:
+ work_line_->CheckUnaryOp(inst, reg_types_.Short(), reg_types_.Integer());
+ break;
+
+ case Instruction::ADD_INT:
+ case Instruction::SUB_INT:
+ case Instruction::MUL_INT:
+ case Instruction::REM_INT:
+ case Instruction::DIV_INT:
+ case Instruction::SHL_INT:
+ case Instruction::SHR_INT:
+ case Instruction::USHR_INT:
+ work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
+ reg_types_.Integer(), false);
+ break;
+ case Instruction::AND_INT:
+ case Instruction::OR_INT:
+ case Instruction::XOR_INT:
+ work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
+ reg_types_.Integer(), true);
+ break;
+ case Instruction::ADD_LONG:
+ case Instruction::SUB_LONG:
+ case Instruction::MUL_LONG:
+ case Instruction::DIV_LONG:
+ case Instruction::REM_LONG:
+ case Instruction::AND_LONG:
+ case Instruction::OR_LONG:
+ case Instruction::XOR_LONG:
+ work_line_->CheckBinaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.LongLo(), reg_types_.LongHi());
+ break;
+ case Instruction::SHL_LONG:
+ case Instruction::SHR_LONG:
+ case Instruction::USHR_LONG:
+ /* shift distance is Int, making these different from other binary operations */
+ work_line_->CheckBinaryOpWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.Integer());
+ break;
+ case Instruction::ADD_FLOAT:
+ case Instruction::SUB_FLOAT:
+ case Instruction::MUL_FLOAT:
+ case Instruction::DIV_FLOAT:
+ case Instruction::REM_FLOAT:
+ work_line_->CheckBinaryOp(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
+ break;
+ case Instruction::ADD_DOUBLE:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::REM_DOUBLE:
+ work_line_->CheckBinaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi());
+ break;
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT_2ADDR:
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
+ break;
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT_2ADDR:
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), true);
+ break;
+ case Instruction::DIV_INT_2ADDR:
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
+ break;
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG_2ADDR:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::XOR_LONG_2ADDR:
+ work_line_->CheckBinaryOp2addrWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.LongLo(), reg_types_.LongHi());
+ break;
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::USHR_LONG_2ADDR:
+ work_line_->CheckBinaryOp2addrWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+ reg_types_.Integer());
+ break;
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT_2ADDR:
+ work_line_->CheckBinaryOp2addr(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
+ break;
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE_2ADDR:
+ work_line_->CheckBinaryOp2addrWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+ reg_types_.DoubleLo(), reg_types_.DoubleHi());
+ break;
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::RSUB_INT:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT16:
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, true);
+ break;
+ case Instruction::AND_INT_LIT16:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::XOR_INT_LIT16:
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, true);
+ break;
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::USHR_INT_LIT8:
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, false);
+ break;
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT_LIT8:
+ work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, false);
+ break;
+
+ // Special instructions.
+ case Instruction::RETURN_VOID_BARRIER:
+ DCHECK(Runtime::Current()->IsStarted());
+ if (!IsConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-barrier not expected";
+ }
+ break;
+ // Note: the following instructions encode offsets derived from class linking.
+ // As such they use Class*/Field*/AbstractMethod* as these offsets only have
+ // meaning if the class linking and resolution were successful.
+ case Instruction::IGET_QUICK:
+ VerifyIGetQuick(inst, reg_types_.Integer(), true);
+ break;
+ case Instruction::IGET_WIDE_QUICK:
+ VerifyIGetQuick(inst, reg_types_.LongLo(), true);
+ break;
+ case Instruction::IGET_OBJECT_QUICK:
+ VerifyIGetQuick(inst, reg_types_.JavaLangObject(false), false);
+ break;
+ case Instruction::IPUT_QUICK:
+ VerifyIPutQuick(inst, reg_types_.Integer(), true);
+ break;
+ case Instruction::IPUT_WIDE_QUICK:
+ VerifyIPutQuick(inst, reg_types_.LongLo(), true);
+ break;
+ case Instruction::IPUT_OBJECT_QUICK:
+ VerifyIPutQuick(inst, reg_types_.JavaLangObject(false), false);
+ break;
+ case Instruction::INVOKE_VIRTUAL_QUICK:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
+ mirror::AbstractMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
+ if (called_method != NULL) {
+ const char* descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (!return_type.IsLowHalf()) {
+ work_line_->SetResultRegisterType(return_type);
+ } else {
+ work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
+ }
+ just_set_result = true;
+ }
+ break;
+ }
+
+ /* These should never appear during verification. */
+ case Instruction::UNUSED_3E:
+ case Instruction::UNUSED_3F:
+ case Instruction::UNUSED_40:
+ case Instruction::UNUSED_41:
+ case Instruction::UNUSED_42:
+ case Instruction::UNUSED_43:
+ case Instruction::UNUSED_79:
+ case Instruction::UNUSED_7A:
+ case Instruction::UNUSED_EB:
+ case Instruction::UNUSED_EC:
+ case Instruction::UNUSED_ED:
+ case Instruction::UNUSED_EE:
+ case Instruction::UNUSED_EF:
+ case Instruction::UNUSED_F0:
+ case Instruction::UNUSED_F1:
+ case Instruction::UNUSED_F2:
+ case Instruction::UNUSED_F3:
+ case Instruction::UNUSED_F4:
+ case Instruction::UNUSED_F5:
+ case Instruction::UNUSED_F6:
+ case Instruction::UNUSED_F7:
+ case Instruction::UNUSED_F8:
+ case Instruction::UNUSED_F9:
+ case Instruction::UNUSED_FA:
+ case Instruction::UNUSED_FB:
+ case Instruction::UNUSED_FC:
+ case Instruction::UNUSED_FD:
+ case Instruction::UNUSED_FE:
+ case Instruction::UNUSED_FF:
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
+ break;
+
+ /*
+ * DO NOT add a "default" clause here. Without it the compiler will
+ * complain if an instruction is missing (which is desirable).
+ */
+ } // end - switch (dec_insn.opcode)
+
+ if (have_pending_hard_failure_) {
+ if (Runtime::Current()->IsCompiler()) {
+ /* When compiling, check that the last failure is a hard failure */
+ CHECK_EQ(failures_[failures_.size() - 1], VERIFY_ERROR_BAD_CLASS_HARD);
+ }
+ /* immediate failure, reject class */
+ info_messages_ << "Rejecting opcode " << inst->DumpString(dex_file_);
+ return false;
+ } else if (have_pending_runtime_throw_failure_) {
+ /* slow path will throw, mark following code as unreachable */
+ opcode_flags = Instruction::kThrow;
+ }
+ /*
+ * If we didn't just set the result register, clear it out. This ensures that you can only use
+ * "move-result" immediately after the result is set. (We could check this statically, but it's
+ * not expensive and it makes our debugging output cleaner.)
+ */
+ if (!just_set_result) {
+ work_line_->SetResultTypeToUnknown();
+ }
+
+
+
+ /*
+ * Handle "branch". Tag the branch target.
+ *
+ * NOTE: instructions like Instruction::EQZ provide information about the
+ * state of the register when the branch is taken or not taken. For example,
+ * somebody could get a reference field, check it for zero, and if the
+ * branch is taken immediately store that register in a boolean field
+ * since the value is known to be zero. We do not currently account for
+ * that, and will reject the code.
+ *
+ * TODO: avoid re-fetching the branch target
+ */
+ if ((opcode_flags & Instruction::kBranch) != 0) {
+ bool isConditional, selfOkay;
+ if (!GetBranchOffset(work_insn_idx_, &branch_target, &isConditional, &selfOkay)) {
+ /* should never happen after static verification */
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad branch";
+ return false;
+ }
+ DCHECK_EQ(isConditional, (opcode_flags & Instruction::kContinue) != 0);
+ if (!CheckNotMoveException(code_item_->insns_, work_insn_idx_ + branch_target)) {
+ return false;
+ }
+ /* update branch target, set "changed" if appropriate */
+ if (NULL != branch_line.get()) {
+ if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get())) {
+ return false;
+ }
+ } else {
+ if (!UpdateRegisters(work_insn_idx_ + branch_target, work_line_.get())) {
+ return false;
+ }
+ }
+ }
+
+ /*
+ * Handle "switch". Tag all possible branch targets.
+ *
+ * We've already verified that the table is structurally sound, so we
+ * just need to walk through and tag the targets.
+ */
+ if ((opcode_flags & Instruction::kSwitch) != 0) {
+ int offset_to_switch = insns[1] | (((int32_t) insns[2]) << 16);
+ const uint16_t* switch_insns = insns + offset_to_switch;
+ int switch_count = switch_insns[1];
+ int offset_to_targets, targ;
+
+ if ((*insns & 0xff) == Instruction::PACKED_SWITCH) {
+ /* 0 = sig, 1 = count, 2/3 = first key */
+ offset_to_targets = 4;
+ } else {
+ /* 0 = sig, 1 = count, 2..count * 2 = keys */
+ DCHECK((*insns & 0xff) == Instruction::SPARSE_SWITCH);
+ offset_to_targets = 2 + 2 * switch_count;
+ }
+
+ /* verify each switch target */
+ for (targ = 0; targ < switch_count; targ++) {
+ int offset;
+ uint32_t abs_offset;
+
+ /* offsets are 32-bit, and only partly endian-swapped */
+ offset = switch_insns[offset_to_targets + targ * 2] |
+ (((int32_t) switch_insns[offset_to_targets + targ * 2 + 1]) << 16);
+ abs_offset = work_insn_idx_ + offset;
+ DCHECK_LT(abs_offset, code_item_->insns_size_in_code_units_);
+ if (!CheckNotMoveException(code_item_->insns_, abs_offset)) {
+ return false;
+ }
+ if (!UpdateRegisters(abs_offset, work_line_.get()))
+ return false;
+ }
+ }
+
+ /*
+ * Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
+ * "try" block when they throw, control transfers out of the method.)
+ */
+ if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+ bool within_catch_all = false;
+ CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
+
+ for (; iterator.HasNext(); iterator.Next()) {
+ if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
+ within_catch_all = true;
+ }
+ /*
+ * Merge registers into the "catch" block. We want to use the "savedRegs" rather than
+ * "work_regs", because at runtime the exception will be thrown before the instruction
+ * modifies any registers.
+ */
+ if (!UpdateRegisters(iterator.GetHandlerAddress(), saved_line_.get())) {
+ return false;
+ }
+ }
+
+ /*
+ * If the monitor stack depth is nonzero, there must be a "catch all" handler for this
+ * instruction. This does apply to monitor-exit because of async exception handling.
+ */
+ if (work_line_->MonitorStackDepth() > 0 && !within_catch_all) {
+ /*
+ * The state in work_line reflects the post-execution state. If the current instruction is a
+ * monitor-enter and the monitor stack was empty, we don't need a catch-all (if it throws,
+ * it will do so before grabbing the lock).
+ */
+ if (inst->Opcode() != Instruction::MONITOR_ENTER || work_line_->MonitorStackDepth() != 1) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "expected to be within a catch-all for an instruction where a monitor is held";
+ return false;
+ }
+ }
+ }
+
+ /* Handle "continue". Tag the next consecutive instruction.
+ * Note: Keep the code handling "continue" case below the "branch" and "switch" cases,
+ * because it changes work_line_ when performing peephole optimization
+ * and this change should not be used in those cases.
+ */
+ if ((opcode_flags & Instruction::kContinue) != 0) {
+ uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
+ if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
+ return false;
+ }
+ // The only way to get to a move-exception instruction is to get thrown there. Make sure the
+ // next instruction isn't one.
+ if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
+ return false;
+ }
+ if (NULL != fallthrough_line.get()) {
+ // Make workline consistent with fallthrough computed from peephole optimization.
+ work_line_->CopyFromLine(fallthrough_line.get());
+ }
+ RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
+ if (next_line != NULL) {
+ // Merge registers into what we have for the next instruction,
+ // and set the "changed" flag if needed.
+ if (!UpdateRegisters(next_insn_idx, work_line_.get())) {
+ return false;
+ }
+ } else {
+ /*
+ * We're not recording register data for the next instruction, so we don't know what the
+ * prior state was. We have to assume that something has changed and re-evaluate it.
+ */
+ insn_flags_[next_insn_idx].SetChanged();
+ }
+ }
+
+ /* If we're returning from the method, make sure monitor stack is empty. */
+ if ((opcode_flags & Instruction::kReturn) != 0) {
+ if (!work_line_->VerifyMonitorStackEmpty()) {
+ return false;
+ }
+ }
+
+ /*
+ * Update start_guess. Advance to the next instruction of that's
+ * possible, otherwise use the branch target if one was found. If
+ * neither of those exists we're in a return or throw; leave start_guess
+ * alone and let the caller sort it out.
+ */
+ if ((opcode_flags & Instruction::kContinue) != 0) {
+ *start_guess = work_insn_idx_ + insn_flags_[work_insn_idx_].GetLengthInCodeUnits();
+ } else if ((opcode_flags & Instruction::kBranch) != 0) {
+ /* we're still okay if branch_target is zero */
+ *start_guess = work_insn_idx_ + branch_target;
+ }
+
+ DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
+ DCHECK(insn_flags_[*start_guess].IsOpcode());
+
+ return true;
+}
+
+const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ const RegType& referrer = GetDeclaringClass();
+ mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
+ const RegType& result =
+ klass != NULL ? reg_types_.FromClass(descriptor, klass,
+ klass->CannotBeAssignedFromOtherTypes())
+ : reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ if (result.IsConflict()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
+ << "' in " << referrer;
+ return result;
+ }
+ if (klass == NULL && !result.IsUnresolvedTypes()) {
+ dex_cache_->SetResolvedType(class_idx, result.GetClass());
+ }
+ // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
+ // check at runtime if access is allowed and so pass here.
+ if (!result.IsUnresolvedTypes() && !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
+ Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+ << referrer << "' -> '" << result << "'";
+ }
+ return result;
+}
+
+const RegType& MethodVerifier::GetCaughtExceptionType() {
+ const RegType* common_super = NULL;
+ if (code_item_->tries_size_ != 0) {
+ const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+ for (uint32_t i = 0; i < handlers_size; i++) {
+ CatchHandlerIterator iterator(handlers_ptr);
+ for (; iterator.HasNext(); iterator.Next()) {
+ if (iterator.GetHandlerAddress() == (uint32_t) work_insn_idx_) {
+ if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
+ common_super = &reg_types_.JavaLangThrowable(false);
+ } else {
+ const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+ if (common_super == NULL) {
+ // Unconditionally assign for the first handler. We don't assert this is a Throwable
+ // as that is caught at runtime
+ common_super = &exception;
+ } else if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
+ // We don't know enough about the type and the common path merge will result in
+ // Conflict. Fail here knowing the correct thing can be done at runtime.
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unexpected non-exception class " << exception;
+ return reg_types_.Conflict();
+ } else if (common_super->Equals(exception)) {
+ // odd case, but nothing to do
+ } else {
+ common_super = &common_super->Merge(exception, &reg_types_);
+ CHECK(reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super));
+ }
+ }
+ }
+ }
+ handlers_ptr = iterator.EndDataPointer();
+ }
+ }
+ if (common_super == NULL) {
+ /* no catch blocks, or no catches with classes we can find */
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unable to find exception handler";
+ return reg_types_.Conflict();
+ }
+ return *common_super;
+}
+
+mirror::AbstractMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
+ MethodType method_type) {
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
+ const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+ if (klass_type.IsConflict()) {
+ std::string append(" in attempt to access method ");
+ append += dex_file_->GetMethodName(method_id);
+ AppendToLastFailMessage(append);
+ return NULL;
+ }
+ if (klass_type.IsUnresolvedTypes()) {
+ return NULL; // Can't resolve Class so no more to do here
+ }
+ mirror::Class* klass = klass_type.GetClass();
+ const RegType& referrer = GetDeclaringClass();
+ mirror::AbstractMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
+ if (res_method == NULL) {
+ const char* name = dex_file_->GetMethodName(method_id);
+ std::string signature(dex_file_->CreateMethodSignature(method_id.proto_idx_, NULL));
+
+ if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) {
+ res_method = klass->FindDirectMethod(name, signature);
+ } else if (method_type == METHOD_INTERFACE) {
+ res_method = klass->FindInterfaceMethod(name, signature);
+ } else {
+ res_method = klass->FindVirtualMethod(name, signature);
+ }
+ if (res_method != NULL) {
+ dex_cache_->SetResolvedMethod(dex_method_idx, res_method);
+ } else {
+ // If a virtual or interface method wasn't found with the expected type, look in
+ // the direct methods. This can happen when the wrong invoke type is used or when
+ // a class has changed, and will be flagged as an error in later checks.
+ if (method_type == METHOD_INTERFACE || method_type == METHOD_VIRTUAL) {
+ res_method = klass->FindDirectMethod(name, signature);
+ }
+ if (res_method == NULL) {
+ Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method "
+ << PrettyDescriptor(klass) << "." << name
+ << " " << signature;
+ return NULL;
+ }
+ }
+ }
+ // Make sure calls to constructors are "direct". There are additional restrictions but we don't
+ // enforce them here.
+ if (res_method->IsConstructor() && method_type != METHOD_DIRECT) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "rejecting non-direct call to constructor "
+ << PrettyMethod(res_method);
+ return NULL;
+ }
+ // Disallow any calls to class initializers.
+ if (MethodHelper(res_method).IsClassInitializer()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "rejecting call to class initializer "
+ << PrettyMethod(res_method);
+ return NULL;
+ }
+ // Check if access is allowed.
+ if (!referrer.CanAccessMember(res_method->GetDeclaringClass(), res_method->GetAccessFlags())) {
+ Fail(VERIFY_ERROR_ACCESS_METHOD) << "illegal method access (call " << PrettyMethod(res_method)
+ << " from " << referrer << ")";
+ return res_method;
+ }
+ // Check that invoke-virtual and invoke-super are not used on private methods of the same class.
+ if (res_method->IsPrivate() && method_type == METHOD_VIRTUAL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke-super/virtual can't be used on private method "
+ << PrettyMethod(res_method);
+ return NULL;
+ }
+ // Check that interface methods match interface classes.
+ if (klass->IsInterface() && method_type != METHOD_INTERFACE) {
+ Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method)
+ << " is in an interface class " << PrettyClass(klass);
+ return NULL;
+ } else if (!klass->IsInterface() && method_type == METHOD_INTERFACE) {
+ Fail(VERIFY_ERROR_CLASS_CHANGE) << "interface method " << PrettyMethod(res_method)
+ << " is in a non-interface class " << PrettyClass(klass);
+ return NULL;
+ }
+ // See if the method type implied by the invoke instruction matches the access flags for the
+ // target method.
+ if ((method_type == METHOD_DIRECT && !res_method->IsDirect()) ||
+ (method_type == METHOD_STATIC && !res_method->IsStatic()) ||
+ ((method_type == METHOD_VIRTUAL || method_type == METHOD_INTERFACE) && res_method->IsDirect())
+ ) {
+ Fail(VERIFY_ERROR_CLASS_CHANGE) << "invoke type (" << method_type << ") does not match method "
+ " type of " << PrettyMethod(res_method);
+ return NULL;
+ }
+ return res_method;
+}
+
+mirror::AbstractMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
+ MethodType method_type,
+ bool is_range,
+ bool is_super) {
+ // Resolve the method. This could be an abstract or concrete method depending on what sort of call
+ // we're making.
+ const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ mirror::AbstractMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
+ if (res_method == NULL) { // error or class is unresolved
+ return NULL;
+ }
+
+ // If we're using invoke-super(method), make sure that the executing method's class' superclass
+ // has a vtable entry for the target method.
+ if (is_super) {
+ DCHECK(method_type == METHOD_VIRTUAL);
+ const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
+ if (super.IsUnresolvedTypes()) {
+ Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
+ << PrettyMethod(dex_method_idx_, *dex_file_)
+ << " to super " << PrettyMethod(res_method);
+ return NULL;
+ }
+ mirror::Class* super_klass = super.GetClass();
+ if (res_method->GetMethodIndex() >= super_klass->GetVTable()->GetLength()) {
+ MethodHelper mh(res_method);
+ Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from "
+ << PrettyMethod(dex_method_idx_, *dex_file_)
+ << " to super " << super
+ << "." << mh.GetName()
+ << mh.GetSignature();
+ return NULL;
+ }
+ }
+ // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
+ // match the call to the signature. Also, we might be calling through an abstract method
+ // definition (which doesn't have register count values).
+ const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
+ /* caught by static verifier */
+ DCHECK(is_range || expected_args <= 5);
+ if (expected_args > code_item_->outs_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
+ << ") exceeds outsSize (" << code_item_->outs_size_ << ")";
+ return NULL;
+ }
+
+ /*
+ * Check the "this" argument, which must be an instance of the class that declared the method.
+ * For an interface class, we don't do the full interface merge (see JoinClass), so we can't do a
+ * rigorous check here (which is okay since we have to do it at runtime).
+ */
+ size_t actual_args = 0;
+ if (!res_method->IsStatic()) {
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
+ return NULL;
+ }
+ if (actual_arg_type.IsUninitializedReference() && !res_method->IsConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
+ return NULL;
+ }
+ if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
+ mirror::Class* klass = res_method->GetDeclaringClass();
+ const RegType& res_method_class =
+ reg_types_.FromClass(ClassHelper(klass).GetDescriptor(), klass,
+ klass->CannotBeAssignedFromOtherTypes());
+ if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type
+ << "' not instance of '" << res_method_class << "'";
+ return NULL;
+ }
+ }
+ actual_args++;
+ }
+ /*
+ * Process the target method's signature. This signature may or may not
+ * have been verified, so we can't assume it's properly formed.
+ */
+ MethodHelper mh(res_method);
+ const DexFile::TypeList* params = mh.GetParameterTypeList();
+ size_t params_size = params == NULL ? 0 : params->Size();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
+ for (size_t param_index = 0; param_index < params_size; param_index++) {
+ if (actual_args >= expected_args) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '" << PrettyMethod(res_method)
+ << "'. Expected " << expected_args << " arguments, processing argument " << actual_args
+ << " (where longs/doubles count twice).";
+ return NULL;
+ }
+ const char* descriptor =
+ mh.GetTypeDescriptorFromTypeIdx(params->GetTypeItem(param_index).type_idx_);
+ if (descriptor == NULL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
+ << " missing signature component";
+ return NULL;
+ }
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
+ if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
+ return res_method;
+ }
+ actual_args = reg_type.IsLongOrDoubleTypes() ? actual_args + 2 : actual_args + 1;
+ }
+ if (actual_args != expected_args) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
+ << " expected " << expected_args << " arguments, found " << actual_args;
+ return NULL;
+ } else {
+ return res_method;
+ }
+}
+
+mirror::AbstractMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst,
+ RegisterLine* reg_line,
+ bool is_range) {
+ DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
+ inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
+ const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+ if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
+ return NULL;
+ }
+ mirror::Class* this_class = NULL;
+ if (!actual_arg_type.IsUnresolvedTypes()) {
+ this_class = actual_arg_type.GetClass();
+ } else {
+ const std::string& descriptor(actual_arg_type.GetDescriptor());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ this_class = class_linker->FindClass(descriptor.c_str(), class_loader_);
+ if (this_class == NULL) {
+ Thread::Current()->ClearException();
+ // Look for a system class
+ this_class = class_linker->FindClass(descriptor.c_str(), NULL);
+ }
+ }
+ if (this_class == NULL) {
+ return NULL;
+ }
+ mirror::ObjectArray<mirror::AbstractMethod>* vtable = this_class->GetVTable();
+ CHECK(vtable != NULL);
+ uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ CHECK(vtable_index < vtable->GetLength());
+ mirror::AbstractMethod* res_method = vtable->Get(vtable_index);
+ CHECK(!Thread::Current()->IsExceptionPending());
+ return res_method;
+}
+
+mirror::AbstractMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
+ bool is_range) {
+ DCHECK(Runtime::Current()->IsStarted());
+ mirror::AbstractMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
+ is_range);
+ if (res_method == NULL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
+ return NULL;
+ }
+ CHECK(!res_method->IsDirect() && !res_method->IsStatic());
+
+ // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
+ // match the call to the signature. Also, we might be calling through an abstract method
+ // definition (which doesn't have register count values).
+ const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
+ return NULL;
+ }
+ const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
+ /* caught by static verifier */
+ DCHECK(is_range || expected_args <= 5);
+ if (expected_args > code_item_->outs_size_) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
+ << ") exceeds outsSize (" << code_item_->outs_size_ << ")";
+ return NULL;
+ }
+
+ /*
+ * Check the "this" argument, which must be an instance of the class that declared the method.
+ * For an interface class, we don't do the full interface merge (see JoinClass), so we can't do a
+ * rigorous check here (which is okay since we have to do it at runtime).
+ */
+ if (actual_arg_type.IsUninitializedReference() && !res_method->IsConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
+ return NULL;
+ }
+ if (!actual_arg_type.IsZero()) {
+ mirror::Class* klass = res_method->GetDeclaringClass();
+ const RegType& res_method_class =
+ reg_types_.FromClass(ClassHelper(klass).GetDescriptor(), klass,
+ klass->CannotBeAssignedFromOtherTypes());
+ if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type
+ << "' not instance of '" << res_method_class << "'";
+ return NULL;
+ }
+ }
+ /*
+ * Process the target method's signature. This signature may or may not
+ * have been verified, so we can't assume it's properly formed.
+ */
+ MethodHelper mh(res_method);
+ const DexFile::TypeList* params = mh.GetParameterTypeList();
+ size_t params_size = params == NULL ? 0 : params->Size();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
+ size_t actual_args = 1;
+ for (size_t param_index = 0; param_index < params_size; param_index++) {
+ if (actual_args >= expected_args) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '" << PrettyMethod(res_method)
+ << "'. Expected " << expected_args << " arguments, processing argument " << actual_args
+ << " (where longs/doubles count twice).";
+ return NULL;
+ }
+ const char* descriptor =
+ mh.GetTypeDescriptorFromTypeIdx(params->GetTypeItem(param_index).type_idx_);
+ if (descriptor == NULL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
+ << " missing signature component";
+ return NULL;
+ }
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
+ if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
+ return res_method;
+ }
+ actual_args = reg_type.IsLongOrDoubleTypes() ? actual_args + 2 : actual_args + 1;
+ }
+ if (actual_args != expected_args) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
+ << " expected " << expected_args << " arguments, found " << actual_args;
+ return NULL;
+ } else {
+ return res_method;
+ }
+}
+
+void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range) {
+ uint32_t type_idx;
+ if (!is_filled) {
+ DCHECK_EQ(inst->Opcode(), Instruction::NEW_ARRAY);
+ type_idx = inst->VRegC_22c();
+ } else if (!is_range) {
+ DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
+ type_idx = inst->VRegB_35c();
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
+ type_idx = inst->VRegB_3rc();
+ }
+ const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ if (res_type.IsConflict()) { // bad class
+ DCHECK_NE(failures_.size(), 0U);
+ } else {
+ // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
+ if (!res_type.IsArrayTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "new-array on non-array class " << res_type;
+ } else if (!is_filled) {
+ /* make sure "size" register is valid type */
+ work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
+ /* set register type to array class */
+ const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
+ } else {
+ // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
+ // the list and fail. It's legal, if silly, for arg_count to be zero.
+ const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_);
+ uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
+ uint32_t arg[5];
+ if (!is_range) {
+ inst->GetArgs(arg);
+ }
+ for (size_t ui = 0; ui < arg_count; ui++) {
+ uint32_t get_reg = is_range ? inst->VRegC_3rc() + ui : arg[ui];
+ if (!work_line_->VerifyRegisterType(get_reg, expected_type)) {
+ work_line_->SetResultRegisterType(reg_types_.Conflict());
+ return;
+ }
+ }
+ // filled-array result goes into "result" register
+ const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ work_line_->SetResultRegisterType(precise_type);
+ }
+ }
+}
+
+void MethodVerifier::VerifyAGet(const Instruction* inst,
+ const RegType& insn_type, bool is_primitive) {
+ const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ if (!index_type.IsArrayIndexTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
+ } else {
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ if (array_type.IsZero()) {
+ // Null array class; this code path will fail at runtime. Infer a merge-able type from the
+ // instruction type. TODO: have a proper notion of bottom here.
+ if (!is_primitive || insn_type.IsCategory1Types()) {
+ // Reference or category 1
+ work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Zero());
+ } else {
+ // Category 2
+ work_line_->SetRegisterTypeWide(inst->VRegA_23x(), reg_types_.FromCat2ConstLo(0, false),
+ reg_types_.FromCat2ConstHi(0, false));
+ }
+ } else if (!array_type.IsArrayTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
+ } else {
+ /* verify the class */
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
+ if (!component_type.IsReferenceTypes() && !is_primitive) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
+ << " source for aget-object";
+ } else if (component_type.IsNonZeroReferenceTypes() && is_primitive) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "reference array type " << array_type
+ << " source for category 1 aget";
+ } else if (is_primitive && !insn_type.Equals(component_type) &&
+ !((insn_type.IsInteger() && component_type.IsFloat()) ||
+ (insn_type.IsLong() && component_type.IsDouble()))) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array type " << array_type
+ << " incompatible with aget of type " << insn_type;
+ } else {
+ // Use knowledge of the field type which is stronger than the type inferred from the
+ // instruction, which can't differentiate object types and ints from floats, longs from
+ // doubles.
+ if (!component_type.IsLowHalf()) {
+ work_line_->SetRegisterType(inst->VRegA_23x(), component_type);
+ } else {
+ work_line_->SetRegisterTypeWide(inst->VRegA_23x(), component_type,
+ component_type.HighHalf(&reg_types_));
+ }
+ }
+ }
+ }
+}
+
+void MethodVerifier::VerifyAPut(const Instruction* inst,
+ const RegType& insn_type, bool is_primitive) {
+ const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ if (!index_type.IsArrayIndexTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
+ } else {
+ const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ if (array_type.IsZero()) {
+ // Null array type; this code path will fail at runtime. Infer a merge-able type from the
+ // instruction type.
+ } else if (!array_type.IsArrayTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
+ } else {
+ /* verify the class */
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
+ if (!component_type.IsReferenceTypes() && !is_primitive) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
+ << " source for aput-object";
+ } else if (component_type.IsNonZeroReferenceTypes() && is_primitive) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "reference array type " << array_type
+ << " source for category 1 aput";
+ } else if (is_primitive && !insn_type.Equals(component_type) &&
+ !((insn_type.IsInteger() && component_type.IsFloat()) ||
+ (insn_type.IsLong() && component_type.IsDouble()))) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array type " << array_type
+ << " incompatible with aput of type " << insn_type;
+ } else {
+ // The instruction agrees with the type of array, confirm the value to be stored does too
+ // Note: we use the instruction type (rather than the component type) for aput-object as
+ // incompatible classes will be caught at runtime as an array store exception
+ work_line_->VerifyRegisterType(inst->VRegA_23x(), is_primitive ? component_type : insn_type);
+ }
+ }
+ }
+}
+
+mirror::Field* MethodVerifier::GetStaticField(int field_idx) {
+ const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ // Check access to class
+ const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ if (klass_type.IsConflict()) { // bad class
+ AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
+ field_idx, dex_file_->GetFieldName(field_id),
+ dex_file_->GetFieldDeclaringClassDescriptor(field_id)));
+ return NULL;
+ }
+ if (klass_type.IsUnresolvedTypes()) {
+ return NULL; // Can't resolve Class so no more to do here, will do checking at runtime.
+ }
+ mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx,
+ dex_cache_, class_loader_);
+ if (field == NULL) {
+ LOG(INFO) << "Unable to resolve static field " << field_idx << " ("
+ << dex_file_->GetFieldName(field_id) << ") in "
+ << dex_file_->GetFieldDeclaringClassDescriptor(field_id);
+ DCHECK(Thread::Current()->IsExceptionPending());
+ Thread::Current()->ClearException();
+ return NULL;
+ } else if (!GetDeclaringClass().CanAccessMember(field->GetDeclaringClass(),
+ field->GetAccessFlags())) {
+ Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot access static field " << PrettyField(field)
+ << " from " << GetDeclaringClass();
+ return NULL;
+ } else if (!field->IsStatic()) {
+ Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field) << " to be static";
+ return NULL;
+ } else {
+ return field;
+ }
+}
+
+mirror::Field* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+ const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ // Check access to class
+ const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ if (klass_type.IsConflict()) {
+ AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
+ field_idx, dex_file_->GetFieldName(field_id),
+ dex_file_->GetFieldDeclaringClassDescriptor(field_id)));
+ return NULL;
+ }
+ if (klass_type.IsUnresolvedTypes()) {
+ return NULL; // Can't resolve Class so no more to do here
+ }
+ mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx,
+ dex_cache_, class_loader_);
+ if (field == NULL) {
+ LOG(INFO) << "Unable to resolve instance field " << field_idx << " ("
+ << dex_file_->GetFieldName(field_id) << ") in "
+ << dex_file_->GetFieldDeclaringClassDescriptor(field_id);
+ DCHECK(Thread::Current()->IsExceptionPending());
+ Thread::Current()->ClearException();
+ return NULL;
+ } else if (!GetDeclaringClass().CanAccessMember(field->GetDeclaringClass(),
+ field->GetAccessFlags())) {
+ Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot access instance field " << PrettyField(field)
+ << " from " << GetDeclaringClass();
+ return NULL;
+ } else if (field->IsStatic()) {
+ Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field)
+ << " to not be static";
+ return NULL;
+ } else if (obj_type.IsZero()) {
+ // Cannot infer and check type, however, access will cause null pointer exception
+ return field;
+ } else {
+ mirror::Class* klass = field->GetDeclaringClass();
+ const RegType& field_klass =
+ reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
+ klass, klass->CannotBeAssignedFromOtherTypes());
+ if (obj_type.IsUninitializedTypes() &&
+ (!IsConstructor() || GetDeclaringClass().Equals(obj_type) ||
+ !field_klass.Equals(GetDeclaringClass()))) {
+ // Field accesses through uninitialized references are only allowable for constructors where
+ // the field is declared in this class
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "cannot access instance field " << PrettyField(field)
+ << " of a not fully initialized object within the context of "
+ << PrettyMethod(dex_method_idx_, *dex_file_);
+ return NULL;
+ } else if (!field_klass.IsAssignableFrom(obj_type)) {
+ // Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class
+ // of C1. For resolution to occur the declared class of the field must be compatible with
+ // obj_type, we've discovered this wasn't so, so report the field didn't exist.
+ Fail(VERIFY_ERROR_NO_FIELD) << "cannot access instance field " << PrettyField(field)
+ << " from object of type " << obj_type;
+ return NULL;
+ } else {
+ return field;
+ }
+ }
+}
+
+void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static) {
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+ mirror::Field* field;
+ if (is_static) {
+ field = GetStaticField(field_idx);
+ } else {
+ const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ field = GetInstanceField(object_type, field_idx);
+ }
+ const char* descriptor;
+ mirror::ClassLoader* loader;
+ if (field != NULL) {
+ descriptor = FieldHelper(field).GetTypeDescriptor();
+ loader = field->GetDeclaringClass()->GetClassLoader();
+ } else {
+ const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
+ loader = class_loader_;
+ }
+ const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
+ if (is_primitive) {
+ if (field_type.Equals(insn_type) ||
+ (field_type.IsFloat() && insn_type.IsIntegralTypes()) ||
+ (field_type.IsDouble() && insn_type.IsLongTypes())) {
+ // expected that read is of the correct primitive type or that int reads are reading
+ // floats or long reads are reading doubles
+ } else {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << field_type << "' in get";
+ return;
+ }
+ } else {
+ if (!insn_type.IsAssignableFrom(field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << field_type
+ << "' in get-object";
+ work_line_->SetRegisterType(vregA, reg_types_.Conflict());
+ return;
+ }
+ }
+ if (!field_type.IsLowHalf()) {
+ work_line_->SetRegisterType(vregA, field_type);
+ } else {
+ work_line_->SetRegisterTypeWide(vregA, field_type, field_type.HighHalf(&reg_types_));
+ }
+}
+
+void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static) {
+ uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+ mirror::Field* field;
+ if (is_static) {
+ field = GetStaticField(field_idx);
+ } else {
+ const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ field = GetInstanceField(object_type, field_idx);
+ }
+ const char* descriptor;
+ mirror::ClassLoader* loader;
+ if (field != NULL) {
+ descriptor = FieldHelper(field).GetTypeDescriptor();
+ loader = field->GetDeclaringClass()->GetClassLoader();
+ } else {
+ const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
+ descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
+ loader = class_loader_;
+ }
+ const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ if (field != NULL) {
+ if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
+ Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
+ << " from other class " << GetDeclaringClass();
+ return;
+ }
+ }
+ const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
+ if (is_primitive) {
+ // Primitive field assignability rules are weaker than regular assignability rules
+ bool instruction_compatible;
+ bool value_compatible;
+ const RegType& value_type = work_line_->GetRegisterType(vregA);
+ if (field_type.IsIntegralTypes()) {
+ instruction_compatible = insn_type.IsIntegralTypes();
+ value_compatible = value_type.IsIntegralTypes();
+ } else if (field_type.IsFloat()) {
+ instruction_compatible = insn_type.IsInteger(); // no [is]put-float, so expect [is]put-int
+ value_compatible = value_type.IsFloatTypes();
+ } else if (field_type.IsLong()) {
+ instruction_compatible = insn_type.IsLong();
+ value_compatible = value_type.IsLongTypes();
+ } else if (field_type.IsDouble()) {
+ instruction_compatible = insn_type.IsLong(); // no [is]put-double, so expect [is]put-long
+ value_compatible = value_type.IsDoubleTypes();
+ } else {
+ instruction_compatible = false; // reference field with primitive store
+ value_compatible = false; // unused
+ }
+ if (!instruction_compatible) {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << field_type
+ << "' in put";
+ return;
+ }
+ if (!value_compatible) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
+ << " of type " << value_type
+ << " but expected " << field_type
+ << " for store to " << PrettyField(field) << " in put";
+ return;
+ }
+ } else {
+ if (!insn_type.IsAssignableFrom(field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << field_type
+ << "' in put-object";
+ return;
+ }
+ work_line_->VerifyRegisterType(vregA, field_type);
+ }
+}
+
+// Look for an instance field with this offset.
+// TODO: we may speed up the search if offsets are sorted by doing a quick search.
+static mirror::Field* FindInstanceFieldWithOffset(const mirror::Class* klass,
+ uint32_t field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const mirror::ObjectArray<mirror::Field>* instance_fields = klass->GetIFields();
+ if (instance_fields != NULL) {
+ for (int32_t i = 0, e = instance_fields->GetLength(); i < e; ++i) {
+ mirror::Field* field = instance_fields->Get(i);
+ if (field->GetOffset().Uint32Value() == field_offset) {
+ return field;
+ }
+ }
+ }
+ // We did not find field in class: look into superclass.
+ if (klass->GetSuperClass() != NULL) {
+ return FindInstanceFieldWithOffset(klass->GetSuperClass(), field_offset);
+ } else {
+ return NULL;
+ }
+}
+
+// Returns the access field of a quick field access (iget/iput-quick) or NULL
+// if it cannot be found.
+mirror::Field* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
+ RegisterLine* reg_line) {
+ DCHECK(inst->Opcode() == Instruction::IGET_QUICK ||
+ inst->Opcode() == Instruction::IGET_WIDE_QUICK ||
+ inst->Opcode() == Instruction::IGET_OBJECT_QUICK ||
+ inst->Opcode() == Instruction::IPUT_QUICK ||
+ inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
+ inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
+ const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+ mirror::Class* object_class = NULL;
+ if (!object_type.IsUnresolvedTypes()) {
+ object_class = object_type.GetClass();
+ } else {
+ // We need to resolve the class from its descriptor.
+ const std::string& descriptor(object_type.GetDescriptor());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ object_class = class_linker->FindClass(descriptor.c_str(), class_loader_);
+ if (object_class == NULL) {
+ Thread::Current()->ClearException();
+ // Look for a system class
+ object_class = class_linker->FindClass(descriptor.c_str(), NULL);
+ }
+ }
+ if (object_class == NULL) {
+ // Failed to get the Class* from reg type.
+ LOG(WARNING) << "Failed to get Class* from " << object_type;
+ return NULL;
+ }
+ uint32_t field_offset = static_cast<uint32_t>(inst->VRegC_22c());
+ return FindInstanceFieldWithOffset(object_class, field_offset);
+}
+
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive) {
+ DCHECK(Runtime::Current()->IsStarted());
+ mirror::Field* field = GetQuickFieldAccess(inst, work_line_.get());
+ if (field == NULL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
+ return;
+ }
+ const char* descriptor = FieldHelper(field).GetTypeDescriptor();
+ mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
+ const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ const uint32_t vregA = inst->VRegA_22c();
+ if (is_primitive) {
+ if (field_type.Equals(insn_type) ||
+ (field_type.IsFloat() && insn_type.IsIntegralTypes()) ||
+ (field_type.IsDouble() && insn_type.IsLongTypes())) {
+ // expected that read is of the correct primitive type or that int reads are reading
+ // floats or long reads are reading doubles
+ } else {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << field_type << "' in get";
+ return;
+ }
+ } else {
+ if (!insn_type.IsAssignableFrom(field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << field_type
+ << "' in get-object";
+ work_line_->SetRegisterType(vregA, reg_types_.Conflict());
+ return;
+ }
+ }
+ if (!field_type.IsLowHalf()) {
+ work_line_->SetRegisterType(vregA, field_type);
+ } else {
+ work_line_->SetRegisterTypeWide(vregA, field_type, field_type.HighHalf(&reg_types_));
+ }
+}
+
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive) {
+ DCHECK(Runtime::Current()->IsStarted());
+ mirror::Field* field = GetQuickFieldAccess(inst, work_line_.get());
+ if (field == NULL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
+ return;
+ }
+ const char* descriptor = FieldHelper(field).GetTypeDescriptor();
+ mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
+ const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ if (field != NULL) {
+ if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
+ Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
+ << " from other class " << GetDeclaringClass();
+ return;
+ }
+ }
+ const uint32_t vregA = inst->VRegA_22c();
+ if (is_primitive) {
+ // Primitive field assignability rules are weaker than regular assignability rules
+ bool instruction_compatible;
+ bool value_compatible;
+ const RegType& value_type = work_line_->GetRegisterType(vregA);
+ if (field_type.IsIntegralTypes()) {
+ instruction_compatible = insn_type.IsIntegralTypes();
+ value_compatible = value_type.IsIntegralTypes();
+ } else if (field_type.IsFloat()) {
+ instruction_compatible = insn_type.IsInteger(); // no [is]put-float, so expect [is]put-int
+ value_compatible = value_type.IsFloatTypes();
+ } else if (field_type.IsLong()) {
+ instruction_compatible = insn_type.IsLong();
+ value_compatible = value_type.IsLongTypes();
+ } else if (field_type.IsDouble()) {
+ instruction_compatible = insn_type.IsLong(); // no [is]put-double, so expect [is]put-long
+ value_compatible = value_type.IsDoubleTypes();
+ } else {
+ instruction_compatible = false; // reference field with primitive store
+ value_compatible = false; // unused
+ }
+ if (!instruction_compatible) {
+ // This is a global failure rather than a class change failure as the instructions and
+ // the descriptors for the type should have been consistent within the same file at
+ // compile time
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
+ << " to be of type '" << insn_type
+ << "' but found type '" << field_type
+ << "' in put";
+ return;
+ }
+ if (!value_compatible) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
+ << " of type " << value_type
+ << " but expected " << field_type
+ << " for store to " << PrettyField(field) << " in put";
+ return;
+ }
+ } else {
+ if (!insn_type.IsAssignableFrom(field_type)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
+ << " to be compatible with type '" << insn_type
+ << "' but found type '" << field_type
+ << "' in put-object";
+ return;
+ }
+ work_line_->VerifyRegisterType(vregA, field_type);
+ }
+}
+
+bool MethodVerifier::CheckNotMoveException(const uint16_t* insns, int insn_idx) {
+ if ((insns[insn_idx] & 0xff) == Instruction::MOVE_EXCEPTION) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid use of move-exception";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::UpdateRegisters(uint32_t next_insn, const RegisterLine* merge_line) {
+ bool changed = true;
+ RegisterLine* target_line = reg_table_.GetLine(next_insn);
+ if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+ /*
+ * We haven't processed this instruction before, and we haven't touched the registers here, so
+ * there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
+ * only way a register can transition out of "unknown", so this is not just an optimization.)
+ */
+ target_line->CopyFromLine(merge_line);
+ } else {
+ UniquePtr<RegisterLine> copy(gDebugVerify ? new RegisterLine(target_line->NumRegs(), this) : NULL);
+ if (gDebugVerify) {
+ copy->CopyFromLine(target_line);
+ }
+ changed = target_line->MergeRegisters(merge_line);
+ if (have_pending_hard_failure_) {
+ return false;
+ }
+ if (gDebugVerify && changed) {
+ LogVerifyInfo() << "Merging at [" << reinterpret_cast<void*>(work_insn_idx_) << "]"
+ << " to [" << reinterpret_cast<void*>(next_insn) << "]: " << "\n"
+ << *copy.get() << " MERGE\n"
+ << *merge_line << " ==\n"
+ << *target_line << "\n";
+ }
+ }
+ if (changed) {
+ insn_flags_[next_insn].SetChanged();
+ }
+ return true;
+}
+
+InstructionFlags* MethodVerifier::CurrentInsnFlags() {
+ return &insn_flags_[work_insn_idx_];
+}
+
+const RegType& MethodVerifier::GetMethodReturnType() {
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
+ uint16_t return_type_idx = proto_id.return_type_idx_;
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
+ return reg_types_.FromDescriptor(class_loader_, descriptor, false);
+}
+
+const RegType& MethodVerifier::GetDeclaringClass() {
+ if (declaring_class_ == NULL) {
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
+ if (mirror_method_ != NULL) {
+ mirror::Class* klass = mirror_method_->GetDeclaringClass();
+ declaring_class_ = &reg_types_.FromClass(descriptor, klass,
+ klass->CannotBeAssignedFromOtherTypes());
+ } else {
+ declaring_class_ = &reg_types_.FromDescriptor(class_loader_, descriptor, false);
+ }
+ }
+ return *declaring_class_;
+}
+
+void MethodVerifier::ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bits,
+ size_t* log2_max_gc_pc) {
+ size_t local_gc_points = 0;
+ size_t max_insn = 0;
+ size_t max_ref_reg = -1;
+ for (size_t i = 0; i < code_item_->insns_size_in_code_units_; i++) {
+ if (insn_flags_[i].IsCompileTimeInfoPoint()) {
+ local_gc_points++;
+ max_insn = i;
+ RegisterLine* line = reg_table_.GetLine(i);
+ max_ref_reg = line->GetMaxNonZeroReferenceReg(max_ref_reg);
+ }
+ }
+ *gc_points = local_gc_points;
+ *ref_bitmap_bits = max_ref_reg + 1; // if max register is 0 we need 1 bit to encode (ie +1)
+ size_t i = 0;
+ while ((1U << i) <= max_insn) {
+ i++;
+ }
+ *log2_max_gc_pc = i;
+}
+
+MethodVerifier::MethodSafeCastSet* MethodVerifier::GenerateSafeCastSet() {
+ /*
+ * Walks over the method code and adds any cast instructions in which
+ * the type cast is implicit to a set, which is used in the code generation
+ * to elide these casts.
+ */
+ if (!failure_messages_.empty()) {
+ return NULL;
+ }
+ UniquePtr<MethodSafeCastSet> mscs;
+ const Instruction* inst = Instruction::At(code_item_->insns_);
+ const Instruction* end = Instruction::At(code_item_->insns_ +
+ code_item_->insns_size_in_code_units_);
+
+ for (; inst < end; inst = inst->Next()) {
+ if (Instruction::CHECK_CAST != inst->Opcode()) {
+ continue;
+ }
+ uint32_t dex_pc = inst->GetDexPc(code_item_->insns_);
+ RegisterLine* line = reg_table_.GetLine(dex_pc);
+ const RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+ const RegType& cast_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ if (cast_type.IsStrictlyAssignableFrom(reg_type)) {
+ if (mscs.get() == NULL) {
+ mscs.reset(new MethodSafeCastSet());
+ }
+ mscs->insert(dex_pc);
+ }
+ }
+ return mscs.release();
+}
+
+MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
+
+ // It is risky to rely on reg_types for sharpening in cases of soft
+ // verification, we might end up sharpening to a wrong implementation. Just abort.
+ if (!failure_messages_.empty()) {
+ return NULL;
+ }
+
+ UniquePtr<PcToConcreteMethodMap> pc_to_concrete_method_map;
+ const uint16_t* insns = code_item_->insns_ ;
+ const Instruction* inst = Instruction::At(insns);
+ const Instruction* end = Instruction::At(insns + code_item_->insns_size_in_code_units_);
+
+ for (; inst < end; inst = inst->Next()) {
+ bool is_virtual = (inst->Opcode() == Instruction::INVOKE_VIRTUAL) ||
+ (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE);
+ bool is_interface = (inst->Opcode() == Instruction::INVOKE_INTERFACE) ||
+ (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+
+ if(!is_interface && !is_virtual) {
+ continue;
+ }
+ // Get reg type for register holding the reference to the object that will be dispatched upon.
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ RegisterLine* line = reg_table_.GetLine(dex_pc);
+ bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
+ (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+ const RegType&
+ reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
+
+ if (!reg_type.HasClass()) {
+ // We will compute devirtualization information only when we know the Class of the reg type.
+ continue;
+ }
+ mirror::Class* reg_class = reg_type.GetClass();
+ if (reg_class->IsInterface()) {
+ // We can't devirtualize when the known type of the register is an interface.
+ continue;
+ }
+ if (reg_class->IsAbstract() && !reg_class->IsArrayClass()) {
+ // We can't devirtualize abstract classes except on arrays of abstract classes.
+ continue;
+ }
+ mirror::AbstractMethod* abstract_method =
+ dex_cache_->GetResolvedMethod(is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
+ if(abstract_method == NULL) {
+ // If the method is not found in the cache this means that it was never found
+ // by ResolveMethodAndCheckAccess() called when verifying invoke_*.
+ continue;
+ }
+ // Find the concrete method.
+ mirror::AbstractMethod* concrete_method = NULL;
+ if (is_interface) {
+ concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(abstract_method);
+ }
+ if (is_virtual) {
+ concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(abstract_method);
+ }
+ if (concrete_method == NULL || concrete_method->IsAbstract()) {
+ // In cases where concrete_method is not found, or is abstract, continue to the next invoke.
+ continue;
+ }
+ if (reg_type.IsPreciseReference() || concrete_method->IsFinal() ||
+ concrete_method->GetDeclaringClass()->IsFinal()) {
+ // If we knew exactly the class being dispatched upon, or if the target method cannot be
+ // overridden record the target to be used in the compiler driver.
+ if (pc_to_concrete_method_map.get() == NULL) {
+ pc_to_concrete_method_map.reset(new PcToConcreteMethodMap());
+ }
+ MethodReference concrete_ref(
+ concrete_method->GetDeclaringClass()->GetDexCache()->GetDexFile(),
+ concrete_method->GetDexMethodIndex());
+ pc_to_concrete_method_map->Put(dex_pc, concrete_ref);
+ }
+ }
+ return pc_to_concrete_method_map.release();
+}
+
+const std::vector<uint8_t>* MethodVerifier::GenerateGcMap() {
+ size_t num_entries, ref_bitmap_bits, pc_bits;
+ ComputeGcMapSizes(&num_entries, &ref_bitmap_bits, &pc_bits);
+ // There's a single byte to encode the size of each bitmap
+ if (ref_bitmap_bits >= (8 /* bits per byte */ * 8192 /* 13-bit size */ )) {
+ // TODO: either a better GC map format or per method failures
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot encode GC map for method with "
+ << ref_bitmap_bits << " registers";
+ return NULL;
+ }
+ size_t ref_bitmap_bytes = (ref_bitmap_bits + 7) / 8;
+ // There are 2 bytes to encode the number of entries
+ if (num_entries >= 65536) {
+ // TODO: either a better GC map format or per method failures
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot encode GC map for method with "
+ << num_entries << " entries";
+ return NULL;
+ }
+ size_t pc_bytes;
+ RegisterMapFormat format;
+ if (pc_bits <= 8) {
+ format = kRegMapFormatCompact8;
+ pc_bytes = 1;
+ } else if (pc_bits <= 16) {
+ format = kRegMapFormatCompact16;
+ pc_bytes = 2;
+ } else {
+ // TODO: either a better GC map format or per method failures
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot encode GC map for method with "
+ << (1 << pc_bits) << " instructions (number is rounded up to nearest power of 2)";
+ return NULL;
+ }
+ size_t table_size = ((pc_bytes + ref_bitmap_bytes) * num_entries) + 4;
+ std::vector<uint8_t>* table = new std::vector<uint8_t>;
+ if (table == NULL) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Failed to encode GC map (size=" << table_size << ")";
+ return NULL;
+ }
+ table->reserve(table_size);
+ // Write table header
+ table->push_back(format | ((ref_bitmap_bytes >> DexPcToReferenceMap::kRegMapFormatShift) &
+ ~DexPcToReferenceMap::kRegMapFormatMask));
+ table->push_back(ref_bitmap_bytes & 0xFF);
+ table->push_back(num_entries & 0xFF);
+ table->push_back((num_entries >> 8) & 0xFF);
+ // Write table data
+ for (size_t i = 0; i < code_item_->insns_size_in_code_units_; i++) {
+ if (insn_flags_[i].IsCompileTimeInfoPoint()) {
+ table->push_back(i & 0xFF);
+ if (pc_bytes == 2) {
+ table->push_back((i >> 8) & 0xFF);
+ }
+ RegisterLine* line = reg_table_.GetLine(i);
+ line->WriteReferenceBitMap(*table, ref_bitmap_bytes);
+ }
+ }
+ DCHECK_EQ(table->size(), table_size);
+ return table;
+}
+
+void MethodVerifier::VerifyGcMap(const std::vector<uint8_t>& data) {
+ // Check that for every GC point there is a map entry, there aren't entries for non-GC points,
+ // that the table data is well formed and all references are marked (or not) in the bitmap
+ DexPcToReferenceMap map(&data[0], data.size());
+ size_t map_index = 0;
+ for (size_t i = 0; i < code_item_->insns_size_in_code_units_; i++) {
+ const uint8_t* reg_bitmap = map.FindBitMap(i, false);
+ if (insn_flags_[i].IsCompileTimeInfoPoint()) {
+ CHECK_LT(map_index, map.NumEntries());
+ CHECK_EQ(map.GetDexPc(map_index), i);
+ CHECK_EQ(map.GetBitMap(map_index), reg_bitmap);
+ map_index++;
+ RegisterLine* line = reg_table_.GetLine(i);
+ for (size_t j = 0; j < code_item_->registers_size_; j++) {
+ if (line->GetRegisterType(j).IsNonZeroReferenceTypes()) {
+ CHECK_LT(j / 8, map.RegWidth());
+ CHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 1);
+ } else if ((j / 8) < map.RegWidth()) {
+ CHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 0);
+ } else {
+ // If a register doesn't contain a reference then the bitmap may be shorter than the line
+ }
+ }
+ } else {
+ CHECK(reg_bitmap == NULL);
+ }
+ }
+}
+
+void MethodVerifier::SetDexGcMap(MethodReference ref, const std::vector<uint8_t>& gc_map) {
+ {
+ WriterMutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
+ DexGcMapTable::iterator it = dex_gc_maps_->find(ref);
+ if (it != dex_gc_maps_->end()) {
+ delete it->second;
+ dex_gc_maps_->erase(it);
+ }
+ dex_gc_maps_->Put(ref, &gc_map);
+ }
+ DCHECK(GetDexGcMap(ref) != NULL);
+}
+
+
+void MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) {
+ MutexLock mu(Thread::Current(), *safecast_map_lock_);
+ SafeCastMap::iterator it = safecast_map_->find(ref);
+ if (it != safecast_map_->end()) {
+ delete it->second;
+ safecast_map_->erase(it);
+ }
+
+ safecast_map_->Put(ref, cast_set);
+ CHECK(safecast_map_->find(ref) != safecast_map_->end());
+}
+
+bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) {
+ MutexLock mu(Thread::Current(), *safecast_map_lock_);
+ SafeCastMap::const_iterator it = safecast_map_->find(ref);
+ if (it == safecast_map_->end()) {
+ return false;
+ }
+
+ // Look up the cast address in the set of safe casts
+ MethodVerifier::MethodSafeCastSet::const_iterator cast_it = it->second->find(pc);
+ return cast_it != it->second->end();
+}
+
+const std::vector<uint8_t>* MethodVerifier::GetDexGcMap(MethodReference ref) {
+ ReaderMutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
+ DexGcMapTable::const_iterator it = dex_gc_maps_->find(ref);
+ if (it == dex_gc_maps_->end()) {
+ LOG(WARNING) << "Didn't find GC map for: " << PrettyMethod(ref.dex_method_index, *ref.dex_file);
+ return NULL;
+ }
+ CHECK(it->second != NULL);
+ return it->second;
+}
+
+void MethodVerifier::SetDevirtMap(MethodReference ref,
+ const PcToConcreteMethodMap* devirt_map) {
+ WriterMutexLock mu(Thread::Current(), *devirt_maps_lock_);
+ DevirtualizationMapTable::iterator it = devirt_maps_->find(ref);
+ if (it != devirt_maps_->end()) {
+ delete it->second;
+ devirt_maps_->erase(it);
+ }
+
+ devirt_maps_->Put(ref, devirt_map);
+ CHECK(devirt_maps_->find(ref) != devirt_maps_->end());
+}
+
+const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref,
+ uint32_t dex_pc) {
+ ReaderMutexLock mu(Thread::Current(), *devirt_maps_lock_);
+ DevirtualizationMapTable::const_iterator it = devirt_maps_->find(ref);
+ if (it == devirt_maps_->end()) {
+ return NULL;
+ }
+
+ // Look up the PC in the map, get the concrete method to execute and return its reference.
+ MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc);
+ if(pc_to_concrete_method != it->second->end()) {
+ return &(pc_to_concrete_method->second);
+ } else {
+ return NULL;
+ }
+}
+
+std::vector<int32_t> MethodVerifier::DescribeVRegs(uint32_t dex_pc) {
+ RegisterLine* line = reg_table_.GetLine(dex_pc);
+ std::vector<int32_t> result;
+ for (size_t i = 0; i < line->NumRegs(); ++i) {
+ const RegType& type = line->GetRegisterType(i);
+ if (type.IsConstant()) {
+ result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
+ result.push_back(type.ConstantValue());
+ } else if (type.IsConstantLo()) {
+ result.push_back(type.IsPreciseConstantLo() ? kConstant : kImpreciseConstant);
+ result.push_back(type.ConstantValueLo());
+ } else if (type.IsConstantHi()) {
+ result.push_back(type.IsPreciseConstantHi() ? kConstant : kImpreciseConstant);
+ result.push_back(type.ConstantValueHi());
+ } else if (type.IsIntegralTypes()) {
+ result.push_back(kIntVReg);
+ result.push_back(0);
+ } else if (type.IsFloat()) {
+ result.push_back(kFloatVReg);
+ result.push_back(0);
+ } else if (type.IsLong()) {
+ result.push_back(kLongLoVReg);
+ result.push_back(0);
+ result.push_back(kLongHiVReg);
+ result.push_back(0);
+ ++i;
+ } else if (type.IsDouble()) {
+ result.push_back(kDoubleLoVReg);
+ result.push_back(0);
+ result.push_back(kDoubleHiVReg);
+ result.push_back(0);
+ ++i;
+ } else if (type.IsUndefined() || type.IsConflict() || type.IsHighHalf()) {
+ result.push_back(kUndefined);
+ result.push_back(0);
+ } else {
+ CHECK(type.IsNonZeroReferenceTypes());
+ result.push_back(kReferenceVReg);
+ result.push_back(0);
+ }
+ }
+ return result;
+}
+
+ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL;
+MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL;
+
+Mutex* MethodVerifier::safecast_map_lock_ = NULL;
+MethodVerifier::SafeCastMap* MethodVerifier::safecast_map_ = NULL;
+
+ReaderWriterMutex* MethodVerifier::devirt_maps_lock_ = NULL;
+MethodVerifier::DevirtualizationMapTable* MethodVerifier::devirt_maps_ = NULL;
+
+Mutex* MethodVerifier::rejected_classes_lock_ = NULL;
+MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL;
+
+void MethodVerifier::Init() {
+ dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock");
+ Thread* self = Thread::Current();
+ {
+ WriterMutexLock mu(self, *dex_gc_maps_lock_);
+ dex_gc_maps_ = new MethodVerifier::DexGcMapTable;
+ }
+
+ safecast_map_lock_ = new Mutex("verifier Cast Elision lock");
+ {
+ MutexLock mu(self, *safecast_map_lock_);
+ safecast_map_ = new MethodVerifier::SafeCastMap();
+ }
+
+ devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock");
+
+ {
+ WriterMutexLock mu(self, *devirt_maps_lock_);
+ devirt_maps_ = new MethodVerifier::DevirtualizationMapTable();
+ }
+
+ rejected_classes_lock_ = new Mutex("verifier rejected classes lock");
+ {
+ MutexLock mu(self, *rejected_classes_lock_);
+ rejected_classes_ = new MethodVerifier::RejectedClassesTable;
+ }
+ art::verifier::RegTypeCache::Init();
+}
+
+void MethodVerifier::Shutdown() {
+ Thread* self = Thread::Current();
+ {
+ WriterMutexLock mu(self, *dex_gc_maps_lock_);
+ STLDeleteValues(dex_gc_maps_);
+ delete dex_gc_maps_;
+ dex_gc_maps_ = NULL;
+ }
+ delete dex_gc_maps_lock_;
+ dex_gc_maps_lock_ = NULL;
+
+ {
+ WriterMutexLock mu(self, *devirt_maps_lock_);
+ STLDeleteValues(devirt_maps_);
+ delete devirt_maps_;
+ devirt_maps_ = NULL;
+ }
+ delete devirt_maps_lock_;
+ devirt_maps_lock_ = NULL;
+
+ {
+ MutexLock mu(self, *rejected_classes_lock_);
+ delete rejected_classes_;
+ rejected_classes_ = NULL;
+ }
+ delete rejected_classes_lock_;
+ rejected_classes_lock_ = NULL;
+ verifier::RegTypeCache::ShutDown();
+}
+
+void MethodVerifier::AddRejectedClass(ClassReference ref) {
+ {
+ MutexLock mu(Thread::Current(), *rejected_classes_lock_);
+ rejected_classes_->insert(ref);
+ }
+ CHECK(IsClassRejected(ref));
+}
+
+bool MethodVerifier::IsClassRejected(ClassReference ref) {
+ MutexLock mu(Thread::Current(), *rejected_classes_lock_);
+ return (rejected_classes_->find(ref) != rejected_classes_->end());
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
new file mode 100644
index 0000000..ac0de9e
--- /dev/null
+++ b/runtime/verifier/method_verifier.h
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_METHOD_VERIFIER_H_
+#define ART_SRC_VERIFIER_METHOD_VERIFIER_H_
+
+#include <set>
+#include <vector>
+
+#include "base/casts.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "class_reference.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "instruction_flags.h"
+#include "method_reference.h"
+#include "mirror/object.h"
+#include "reg_type.h"
+#include "reg_type_cache-inl.h"
+#include "register_line.h"
+#include "safe_map.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+struct ReferenceMap2Visitor;
+
+namespace verifier {
+
+class MethodVerifier;
+class DexPcToReferenceMap;
+
+/*
+ * "Direct" and "virtual" methods are stored independently. The type of call used to invoke the
+ * method determines which list we search, and whether we travel up into superclasses.
+ *
+ * (<clinit>, <init>, and methods declared "private" or "static" are stored in the "direct" list.
+ * All others are stored in the "virtual" list.)
+ */
+enum MethodType {
+ METHOD_UNKNOWN = 0,
+ METHOD_DIRECT, // <init>, private
+ METHOD_STATIC, // static
+ METHOD_VIRTUAL, // virtual, super
+ METHOD_INTERFACE // interface
+};
+std::ostream& operator<<(std::ostream& os, const MethodType& rhs);
+
+/*
+ * An enumeration of problems that can turn up during verification.
+ * Both VERIFY_ERROR_BAD_CLASS_SOFT and VERIFY_ERROR_BAD_CLASS_HARD denote failures that cause
+ * the entire class to be rejected. However, VERIFY_ERROR_BAD_CLASS_SOFT denotes a soft failure
+ * that can potentially be corrected, and the verifier will try again at runtime.
+ * VERIFY_ERROR_BAD_CLASS_HARD denotes a hard failure that can't be corrected, and will cause
+ * the class to remain uncompiled. Other errors denote verification errors that cause bytecode
+ * to be rewritten to fail at runtime.
+ */
+enum VerifyError {
+ VERIFY_ERROR_BAD_CLASS_HARD, // VerifyError; hard error that skips compilation.
+ VERIFY_ERROR_BAD_CLASS_SOFT, // VerifyError; soft error that verifies again at runtime.
+
+ VERIFY_ERROR_NO_CLASS, // NoClassDefFoundError.
+ VERIFY_ERROR_NO_FIELD, // NoSuchFieldError.
+ VERIFY_ERROR_NO_METHOD, // NoSuchMethodError.
+ VERIFY_ERROR_ACCESS_CLASS, // IllegalAccessError.
+ VERIFY_ERROR_ACCESS_FIELD, // IllegalAccessError.
+ VERIFY_ERROR_ACCESS_METHOD, // IllegalAccessError.
+ VERIFY_ERROR_CLASS_CHANGE, // IncompatibleClassChangeError.
+ VERIFY_ERROR_INSTANTIATION, // InstantiationError.
+};
+std::ostream& operator<<(std::ostream& os, const VerifyError& rhs);
+
+/*
+ * Identifies the type of reference in the instruction that generated the verify error
+ * (e.g. VERIFY_ERROR_ACCESS_CLASS could come from a method, field, or class reference).
+ *
+ * This must fit in two bits.
+ */
+enum VerifyErrorRefType {
+ VERIFY_ERROR_REF_CLASS = 0,
+ VERIFY_ERROR_REF_FIELD = 1,
+ VERIFY_ERROR_REF_METHOD = 2,
+};
+const int kVerifyErrorRefTypeShift = 6;
+
+// We don't need to store the register data for many instructions, because we either only need
+// it at branch points (for verification) or GC points and branches (for verification +
+// type-precise register analysis).
+enum RegisterTrackingMode {
+ kTrackRegsBranches,
+ kTrackCompilerInterestPoints,
+ kTrackRegsAll,
+};
+
+// A mapping from a dex pc to the register line statuses as they are immediately prior to the
+// execution of that instruction.
+class PcToRegisterLineTable {
+ public:
+ PcToRegisterLineTable() {}
+ ~PcToRegisterLineTable() {
+ STLDeleteValues(&pc_to_register_line_);
+ }
+
+ // Initialize the RegisterTable. Every instruction address can have a different set of information
+ // about what's in which register, but for verification purposes we only need to store it at
+ // branch target addresses (because we merge into that).
+ void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
+ uint16_t registers_size, MethodVerifier* verifier);
+
+ RegisterLine* GetLine(size_t idx) {
+ Table::iterator result = pc_to_register_line_.find(idx); // TODO: C++0x auto
+ if (result == pc_to_register_line_.end()) {
+ return NULL;
+ } else {
+ return result->second;
+ }
+ }
+
+ private:
+ typedef SafeMap<int32_t, RegisterLine*> Table;
+ Table pc_to_register_line_;
+};
+
+// The verifier
+class MethodVerifier {
+ public:
+ enum FailureKind {
+ kNoFailure,
+ kSoftFailure,
+ kHardFailure,
+ };
+
+ /* Verify a class. Returns "kNoFailure" on success. */
+ static FailureKind VerifyClass(const mirror::Class* klass, std::string& error,
+ bool allow_soft_failures)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static FailureKind VerifyClass(const DexFile* dex_file, mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader, uint32_t class_def_idx,
+ std::string& error, bool allow_soft_failures)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
+ mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader,
+ uint32_t class_def_idx, const DexFile::CodeItem* code_item,
+ mirror::AbstractMethod* method, uint32_t method_access_flags)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint8_t EncodePcToReferenceMapData() const;
+
+ uint32_t DexFileVersion() const {
+ return dex_file_->GetVersion();
+ }
+
+ RegTypeCache* GetRegTypeCache() {
+ return &reg_types_;
+ }
+
+ // Log a verification failure.
+ std::ostream& Fail(VerifyError error);
+
+ // Log for verification information.
+ std::ostream& LogVerifyInfo() {
+ return info_messages_ << "VFY: " << PrettyMethod(dex_method_idx_, *dex_file_)
+ << '[' << reinterpret_cast<void*>(work_insn_idx_) << "] : ";
+ }
+
+ // Dump the failures encountered by the verifier.
+ std::ostream& DumpFailures(std::ostream& os);
+
+ // Dump the state of the verifier, namely each instruction, what flags are set on it, register
+ // information
+ void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static const std::vector<uint8_t>* GetDexGcMap(MethodReference ref)
+ LOCKS_EXCLUDED(dex_gc_maps_lock_);
+
+ static const MethodReference* GetDevirtMap(const MethodReference& ref, uint32_t dex_pc)
+ LOCKS_EXCLUDED(devirt_maps_lock_);
+
+ // Returns true if the cast can statically be verified to be redundant
+ // by using the check-cast elision peephole optimization in the verifier
+ static bool IsSafeCast(MethodReference ref, uint32_t pc) LOCKS_EXCLUDED(safecast_map_lock_);
+
+ // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
+ // to the locks held at 'dex_pc' in method 'm'.
+ static void FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc,
+ std::vector<uint32_t>& monitor_enter_dex_pcs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Returns the accessed field corresponding to the quick instruction's field
+ // offset at 'dex_pc' in method 'm'.
+ static mirror::Field* FindAccessedFieldAtDexPc(mirror::AbstractMethod* m,
+ uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Returns the invoked method corresponding to the quick instruction's vtable
+ // index at 'dex_pc' in method 'm'.
+ static mirror::AbstractMethod* FindInvokedMethodAtDexPc(mirror::AbstractMethod* m,
+ uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void Shutdown();
+
+ static bool IsClassRejected(ClassReference ref)
+ LOCKS_EXCLUDED(rejected_classes_lock_);
+
+ bool CanLoadClasses() const {
+ return can_load_classes_;
+ }
+
+ MethodVerifier(const DexFile* dex_file, mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader, uint32_t class_def_idx,
+ const DexFile::CodeItem* code_item,
+ uint32_t method_idx, mirror::AbstractMethod* method,
+ uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Run verification on the method. Returns true if verification completes and false if the input
+ // has an irrecoverable corruption.
+ bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Describe VRegs at the given dex pc.
+ std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
+
+ private:
+ // Adds the given string to the beginning of the last failure message.
+ void PrependToLastFailMessage(std::string);
+
+ // Adds the given string to the end of the last failure message.
+ void AppendToLastFailMessage(std::string);
+
+ /*
+ * Perform verification on a single method.
+ *
+ * We do this in three passes:
+ * (1) Walk through all code units, determining instruction locations,
+ * widths, and other characteristics.
+ * (2) Walk through all code units, performing static checks on
+ * operands.
+ * (3) Iterate through the method, checking type safety and looking
+ * for code flow problems.
+ */
+ static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
+ mirror::DexCache* dex_cache,
+ mirror::ClassLoader* class_loader, uint32_t class_def_idx,
+ const DexFile::CodeItem* code_item,
+ mirror::AbstractMethod* method, uint32_t method_access_flags,
+ bool allow_soft_failures)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Field* FindAccessedFieldAtDexPc(uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::AbstractMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Compute the width of the instruction at each address in the instruction stream, and store it in
+ * insn_flags_. Addresses that are in the middle of an instruction, or that are part of switch
+ * table data, are not touched (so the caller should probably initialize "insn_flags" to zero).
+ *
+ * The "new_instance_count_" and "monitor_enter_count_" fields in vdata are also set.
+ *
+ * Performs some static checks, notably:
+ * - opcode of first instruction begins at index 0
+ * - only documented instructions may appear
+ * - each instruction follows the last
+ * - last byte of last instruction is at (code_length-1)
+ *
+ * Logs an error and returns "false" on failure.
+ */
+ bool ComputeWidthsAndCountOps();
+
+ /*
+ * Set the "in try" flags for all instructions protected by "try" statements. Also sets the
+ * "branch target" flags for exception handlers.
+ *
+ * Call this after widths have been set in "insn_flags".
+ *
+ * Returns "false" if something in the exception table looks fishy, but we're expecting the
+ * exception table to be somewhat sane.
+ */
+ bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Perform static verification on all instructions in a method.
+ *
+ * Walks through instructions in a method calling VerifyInstruction on each.
+ */
+ bool VerifyInstructions();
+
+ /*
+ * Perform static verification on an instruction.
+ *
+ * As a side effect, this sets the "branch target" flags in InsnFlags.
+ *
+ * "(CF)" items are handled during code-flow analysis.
+ *
+ * v3 4.10.1
+ * - target of each jump and branch instruction must be valid
+ * - targets of switch statements must be valid
+ * - operands referencing constant pool entries must be valid
+ * - (CF) operands of getfield, putfield, getstatic, putstatic must be valid
+ * - (CF) operands of method invocation instructions must be valid
+ * - (CF) only invoke-direct can call a method starting with '<'
+ * - (CF) <clinit> must never be called explicitly
+ * - operands of instanceof, checkcast, new (and variants) must be valid
+ * - new-array[-type] limited to 255 dimensions
+ * - can't use "new" on an array class
+ * - (?) limit dimensions in multi-array creation
+ * - local variable load/store register values must be in valid range
+ *
+ * v3 4.11.1.2
+ * - branches must be within the bounds of the code array
+ * - targets of all control-flow instructions are the start of an instruction
+ * - register accesses fall within range of allocated registers
+ * - (N/A) access to constant pool must be of appropriate type
+ * - code does not end in the middle of an instruction
+ * - execution cannot fall off the end of the code
+ * - (earlier) for each exception handler, the "try" area must begin and
+ * end at the start of an instruction (end can be at the end of the code)
+ * - (earlier) for each exception handler, the handler must start at a valid
+ * instruction
+ */
+ bool VerifyInstruction(const Instruction* inst, uint32_t code_offset);
+
+ /* Ensure that the register index is valid for this code item. */
+ bool CheckRegisterIndex(uint32_t idx);
+
+ /* Ensure that the wide register index is valid for this code item. */
+ bool CheckWideRegisterIndex(uint32_t idx);
+
+ // Perform static checks on a field get or set instruction. All we do here is ensure that the
+ // field index is in the valid range.
+ bool CheckFieldIndex(uint32_t idx);
+
+ // Perform static checks on a method invocation instruction. All we do here is ensure that the
+ // method index is in the valid range.
+ bool CheckMethodIndex(uint32_t idx);
+
+ // Perform static checks on a "new-instance" instruction. Specifically, make sure the class
+ // reference isn't for an array class.
+ bool CheckNewInstance(uint32_t idx);
+
+ /* Ensure that the string index is in the valid range. */
+ bool CheckStringIndex(uint32_t idx);
+
+ // Perform static checks on an instruction that takes a class constant. Ensure that the class
+ // index is in the valid range.
+ bool CheckTypeIndex(uint32_t idx);
+
+ // Perform static checks on a "new-array" instruction. Specifically, make sure they aren't
+ // creating an array of arrays that causes the number of dimensions to exceed 255.
+ bool CheckNewArray(uint32_t idx);
+
+ // Verify an array data table. "cur_offset" is the offset of the fill-array-data instruction.
+ bool CheckArrayData(uint32_t cur_offset);
+
+ // Verify that the target of a branch instruction is valid. We don't expect code to jump directly
+ // into an exception handler, but it's valid to do so as long as the target isn't a
+ // "move-exception" instruction. We verify that in a later stage.
+ // The dex format forbids certain instructions from branching to themselves.
+ // Updates "insn_flags_", setting the "branch target" flag.
+ bool CheckBranchTarget(uint32_t cur_offset);
+
+ // Verify a switch table. "cur_offset" is the offset of the switch instruction.
+ // Updates "insn_flags_", setting the "branch target" flag.
+ bool CheckSwitchTargets(uint32_t cur_offset);
+
+ // Check the register indices used in a "vararg" instruction, such as invoke-virtual or
+ // filled-new-array.
+ // - vA holds word count (0-5), args[] have values.
+ // There are some tests we don't do here, e.g. we don't try to verify that invoking a method that
+ // takes a double is done with consecutive registers. This requires parsing the target method
+ // signature, which we will be doing later on during the code flow analysis.
+ bool CheckVarArgRegs(uint32_t vA, uint32_t arg[]);
+
+ // Check the register indices used in a "vararg/range" instruction, such as invoke-virtual/range
+ // or filled-new-array/range.
+ // - vA holds word count, vC holds index of first reg.
+ bool CheckVarArgRangeRegs(uint32_t vA, uint32_t vC);
+
+ // Extract the relative offset from a branch instruction.
+ // Returns "false" on failure (e.g. this isn't a branch instruction).
+ bool GetBranchOffset(uint32_t cur_offset, int32_t* pOffset, bool* pConditional,
+ bool* selfOkay);
+
+ /* Perform detailed code-flow analysis on a single method. */
+ bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Set the register types for the first instruction in the method based on the method signature.
+ // This has the side-effect of validating the signature.
+ bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Perform code flow on a method.
+ *
+ * The basic strategy is as outlined in v3 4.11.1.2: set the "changed" bit on the first
+ * instruction, process it (setting additional "changed" bits), and repeat until there are no
+ * more.
+ *
+ * v3 4.11.1.1
+ * - (N/A) operand stack is always the same size
+ * - operand stack [registers] contain the correct types of values
+ * - local variables [registers] contain the correct types of values
+ * - methods are invoked with the appropriate arguments
+ * - fields are assigned using values of appropriate types
+ * - opcodes have the correct type values in operand registers
+ * - there is never an uninitialized class instance in a local variable in code protected by an
+ * exception handler (operand stack is okay, because the operand stack is discarded when an
+ * exception is thrown) [can't know what's a local var w/o the debug info -- should fall out of
+ * register typing]
+ *
+ * v3 4.11.1.2
+ * - execution cannot fall off the end of the code
+ *
+ * (We also do many of the items described in the "static checks" sections, because it's easier to
+ * do them here.)
+ *
+ * We need an array of RegType values, one per register, for every instruction. If the method uses
+ * monitor-enter, we need extra data for every register, and a stack for every "interesting"
+ * instruction. In theory this could become quite large -- up to several megabytes for a monster
+ * function.
+ *
+ * NOTE:
+ * The spec forbids backward branches when there's an uninitialized reference in a register. The
+ * idea is to prevent something like this:
+ * loop:
+ * move r1, r0
+ * new-instance r0, MyClass
+ * ...
+ * if-eq rN, loop // once
+ * initialize r0
+ *
+ * This leaves us with two different instances, both allocated by the same instruction, but only
+ * one is initialized. The scheme outlined in v3 4.11.1.4 wouldn't catch this, so they work around
+ * it by preventing backward branches. We achieve identical results without restricting code
+ * reordering by specifying that you can't execute the new-instance instruction if a register
+ * contains an uninitialized instance created by that same instruction.
+ */
+ bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Perform verification for a single instruction.
+ *
+ * This requires fully decoding the instruction to determine the effect it has on registers.
+ *
+ * Finds zero or more following instructions and sets the "changed" flag if execution at that
+ * point needs to be (re-)evaluated. Register changes are merged into "reg_types_" at the target
+ * addresses. Does not set or clear any other flags in "insn_flags_".
+ */
+ bool CodeFlowVerifyInstruction(uint32_t* start_guess)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of a new array instruction
+ void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of an aget instruction. The destination register's type will be set to
+ // be that of component type of the array unless the array type is unknown, in which case a
+ // bottom type inferred from the type of instruction is used. is_primitive is false for an
+ // aget-object.
+ void VerifyAGet(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of an aput instruction.
+ void VerifyAPut(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Lookup instance field and fail for resolution violations
+ mirror::Field* GetInstanceField(const RegType& obj_type, int field_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Lookup static field and fail for resolution violations
+ mirror::Field* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of an iget or sget instruction.
+ void VerifyISGet(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of an iput or sput instruction.
+ void VerifyISPut(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive, bool is_static)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Returns the access field of a quick field access (iget/iput-quick) or NULL
+ // if it cannot be found.
+ mirror::Field* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of an iget-quick instruction.
+ void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Perform verification of an iput-quick instruction.
+ void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+ bool is_primitive)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Resolves a class based on an index and performs access checks to ensure the referrer can
+ // access the resolved class.
+ const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler
+ * address, determine the Join of all exceptions that can land here. Fails if no matching
+ * exception handler can be found or if the Join of exception types fails.
+ */
+ const RegType& GetCaughtExceptionType()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Resolves a method based on an index and performs access checks to ensure
+ * the referrer can access the resolved method.
+ * Does not throw exceptions.
+ */
+ mirror::AbstractMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Verify the arguments to a method. We're executing in "method", making
+ * a call to the method reference in vB.
+ *
+ * If this is a "direct" invoke, we allow calls to <init>. For calls to
+ * <init>, the first argument may be an uninitialized reference. Otherwise,
+ * calls to anything starting with '<' will be rejected, as will any
+ * uninitialized reference arguments.
+ *
+ * For non-static method calls, this will verify that the method call is
+ * appropriate for the "this" argument.
+ *
+ * The method reference is in vBBBB. The "is_range" parameter determines
+ * whether we use 0-4 "args" values or a range of registers defined by
+ * vAA and vCCCC.
+ *
+ * Widening conversions on integers and references are allowed, but
+ * narrowing conversions are not.
+ *
+ * Returns the resolved method on success, NULL on failure (with *failure
+ * set appropriately).
+ */
+ mirror::AbstractMethod* VerifyInvocationArgs(const Instruction* inst,
+ MethodType method_type,
+ bool is_range, bool is_super)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::AbstractMethod* GetQuickInvokedMethod(const Instruction* inst,
+ RegisterLine* reg_line,
+ bool is_range)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::AbstractMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst,
+ bool is_range)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Verify that the target instruction is not "move-exception". It's important that the only way
+ * to execute a move-exception is as the first instruction of an exception handler.
+ * Returns "true" if all is well, "false" if the target instruction is move-exception.
+ */
+ bool CheckNotMoveException(const uint16_t* insns, int insn_idx);
+
+ /*
+ * Control can transfer to "next_insn". Merge the registers from merge_line into the table at
+ * next_insn, and set the changed flag on the target address if any of the registers were changed.
+ * Returns "false" if an error is encountered.
+ */
+ bool UpdateRegisters(uint32_t next_insn, const RegisterLine* merge_line)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Is the method being verified a constructor?
+ bool IsConstructor() const {
+ return (method_access_flags_ & kAccConstructor) != 0;
+ }
+
+ // Is the method verified static?
+ bool IsStatic() const {
+ return (method_access_flags_ & kAccStatic) != 0;
+ }
+
+ // Return the register type for the method.
+ const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get a type representing the declaring class of the method.
+ const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
+ * verification). For type-precise determination we have all the data we need, so we just need to
+ * encode it in some clever fashion.
+ * Returns a pointer to a newly-allocated RegisterMap, or NULL on failure.
+ */
+ const std::vector<uint8_t>* GenerateGcMap();
+
+ // Verify that the GC map associated with method_ is well formed
+ void VerifyGcMap(const std::vector<uint8_t>& data);
+
+ // Compute sizes for GC map data
+ void ComputeGcMapSizes(size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc);
+
+ InstructionFlags* CurrentInsnFlags();
+
+ // All the GC maps that the verifier has created
+ typedef SafeMap<const MethodReference, const std::vector<uint8_t>*,
+ MethodReferenceComparator> DexGcMapTable;
+ static ReaderWriterMutex* dex_gc_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ static DexGcMapTable* dex_gc_maps_ GUARDED_BY(dex_gc_maps_lock_);
+ static void SetDexGcMap(MethodReference ref, const std::vector<uint8_t>& dex_gc_map)
+ LOCKS_EXCLUDED(dex_gc_maps_lock_);
+
+
+ // Cast elision types.
+ typedef std::set<uint32_t> MethodSafeCastSet;
+ typedef SafeMap<const MethodReference, const MethodSafeCastSet*,
+ MethodReferenceComparator> SafeCastMap;
+ MethodVerifier::MethodSafeCastSet* GenerateSafeCastSet()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* mscs);
+ LOCKS_EXCLUDED(safecast_map_lock_);
+ static Mutex* safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ static SafeCastMap* safecast_map_ GUARDED_BY(safecast_map_lock_);
+
+ // Devirtualization map.
+ typedef SafeMap<const uint32_t, MethodReference> PcToConcreteMethodMap;
+ typedef SafeMap<const MethodReference, const PcToConcreteMethodMap*,
+ MethodReferenceComparator> DevirtualizationMapTable;
+ MethodVerifier::PcToConcreteMethodMap* GenerateDevirtMap()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static ReaderWriterMutex* devirt_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ static DevirtualizationMapTable* devirt_maps_ GUARDED_BY(devirt_maps_lock_);
+ static void SetDevirtMap(MethodReference ref,
+ const PcToConcreteMethodMap* pc_method_map)
+ LOCKS_EXCLUDED(devirt_maps_lock_);
+ typedef std::set<ClassReference> RejectedClassesTable;
+ static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ static RejectedClassesTable* rejected_classes_;
+
+ static void AddRejectedClass(ClassReference ref)
+ LOCKS_EXCLUDED(rejected_classes_lock_);
+
+ RegTypeCache reg_types_;
+
+ PcToRegisterLineTable reg_table_;
+
+ // Storage for the register status we're currently working on.
+ UniquePtr<RegisterLine> work_line_;
+
+ // The address of the instruction we're currently working on, note that this is in 2 byte
+ // quantities
+ uint32_t work_insn_idx_;
+
+ // Storage for the register status we're saving for later.
+ UniquePtr<RegisterLine> saved_line_;
+
+ const uint32_t dex_method_idx_; // The method we're working on.
+ // Its object representation if known.
+ mirror::AbstractMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
+ const uint32_t method_access_flags_; // Method's access flags.
+ const DexFile* const dex_file_; // The dex file containing the method.
+ // The dex_cache for the declaring class of the method.
+ mirror::DexCache* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
+ // The class loader for the declaring class of the method.
+ mirror::ClassLoader* class_loader_ GUARDED_BY(Locks::mutator_lock_);
+ const uint32_t class_def_idx_; // The class def index of the declaring class of the method.
+ const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
+ const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
+ // Instruction widths and flags, one entry per code unit.
+ UniquePtr<InstructionFlags[]> insn_flags_;
+ // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
+ uint32_t interesting_dex_pc_;
+ // The container into which FindLocksAtDexPc should write the registers containing held locks,
+ // NULL if we're not doing FindLocksAtDexPc.
+ std::vector<uint32_t>* monitor_enter_dex_pcs_;
+
+ // The types of any error that occurs.
+ std::vector<VerifyError> failures_;
+ // Error messages associated with failures.
+ std::vector<std::ostringstream*> failure_messages_;
+ // Is there a pending hard failure?
+ bool have_pending_hard_failure_;
+ // Is there a pending runtime throw failure? A runtime throw failure is when an instruction
+ // would fail at runtime throwing an exception. Such an instruction causes the following code
+ // to be unreachable. This is set by Fail and used to ensure we don't process unreachable
+ // instructions that would hard fail the verification.
+ bool have_pending_runtime_throw_failure_;
+
+ // Info message log use primarily for verifier diagnostics.
+ std::ostringstream info_messages_;
+
+ // The number of occurrences of specific opcodes.
+ size_t new_instance_count_;
+ size_t monitor_enter_count_;
+
+ const bool can_load_classes_;
+
+ // Converts soft failures to hard failures when false. Only false when the compiler isn't
+ // running and the verifier is called from the class linker.
+ const bool allow_soft_failures_;
+};
+std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_METHOD_VERIFIER_H_
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
new file mode 100644
index 0000000..611b7c0
--- /dev/null
+++ b/runtime/verifier/method_verifier_test.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include "UniquePtr.h"
+#include "class_linker.h"
+#include "common_test.h"
+#include "dex_file.h"
+#include "method_verifier.h"
+
+namespace art {
+namespace verifier {
+
+class MethodVerifierTest : public CommonTest {
+ protected:
+ void VerifyClass(const std::string& descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ASSERT_TRUE(descriptor != NULL);
+ mirror::Class* klass = class_linker_->FindSystemClass(descriptor.c_str());
+
+ // Verify the class
+ std::string error_msg;
+ ASSERT_TRUE(MethodVerifier::VerifyClass(klass, error_msg, true) == MethodVerifier::kNoFailure) << error_msg;
+ }
+
+ void VerifyDexFile(const DexFile* dex)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ASSERT_TRUE(dex != NULL);
+
+ // Verify all the classes defined in this file
+ for (size_t i = 0; i < dex->NumClassDefs(); i++) {
+ const DexFile::ClassDef& class_def = dex->GetClassDef(i);
+ const char* descriptor = dex->GetClassDescriptor(class_def);
+ VerifyClass(descriptor);
+ }
+ }
+};
+
+TEST_F(MethodVerifierTest, LibCore) {
+ ScopedObjectAccess soa(Thread::Current());
+ VerifyDexFile(java_lang_dex_file_);
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
new file mode 100644
index 0000000..1c61a29
--- /dev/null
+++ b/runtime/verifier/reg_type.cc
@@ -0,0 +1,1020 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reg_type.h"
+
+
+#include "base/casts.h"
+#include "dex_file-inl.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "reg_type_cache-inl.h"
+#include "scoped_thread_state_change.h"
+
+#include <limits>
+#include <sstream>
+
+namespace art {
+namespace verifier {
+
+UndefinedType* UndefinedType::instance_ = NULL;
+ConflictType* ConflictType::instance_ = NULL;
+BooleanType* BooleanType::instance = NULL;
+ByteType* ByteType::instance_ = NULL;
+ShortType* ShortType::instance_ = NULL;
+CharType* CharType::instance_ = NULL;
+FloatType* FloatType::instance_ = NULL;
+LongLoType* LongLoType::instance_ = NULL;
+LongHiType* LongHiType::instance_ = NULL;
+DoubleLoType* DoubleLoType::instance_ = NULL;
+DoubleHiType* DoubleHiType::instance_ = NULL;
+IntegerType* IntegerType::instance_ = NULL;
+
+int32_t RegType::ConstantValue() const {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Unexpected call to ConstantValue: " << *this;
+ return 0;
+}
+
+int32_t RegType::ConstantValueLo() const {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Unexpected call to ConstantValueLo: " << *this;
+ return 0;
+}
+
+int32_t RegType::ConstantValueHi() const {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Unexpected call to ConstantValueHi: " << *this;
+ return 0;
+}
+
+PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ CHECK(klass != NULL);
+ CHECK(!descriptor.empty());
+}
+
+Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : PrimitiveType(klass, descriptor, cache_id) {
+}
+
+Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : PrimitiveType(klass, descriptor, cache_id) {
+}
+
+std::string PreciseConstType::Dump() const {
+ std::stringstream result;
+ uint32_t val = ConstantValue();
+ if (val == 0) {
+ CHECK(IsPreciseConstant());
+ result << "Zero/null";
+ } else {
+ result << "Precise ";
+ if (IsConstantShort()) {
+ result << StringPrintf("Constant: %d", val);
+ } else {
+ result << StringPrintf("Constant: 0x%x", val);
+ }
+ }
+ return result.str();
+}
+
+std::string BooleanType::Dump() const {
+ return "boolean";
+}
+
+std::string ConflictType::Dump() const {
+ return "Conflict";
+}
+
+std::string ByteType::Dump() const {
+ return "Byte";
+}
+
+std::string ShortType::Dump() const {
+ return "short";
+}
+
+std::string CharType::Dump() const {
+ return "Char";
+}
+
+std::string FloatType::Dump() const {
+ return "float";
+}
+
+std::string LongLoType::Dump() const {
+ return "long (Low Half)";
+}
+
+std::string LongHiType::Dump() const {
+ return "long (High Half)";
+}
+
+std::string DoubleLoType::Dump() const {
+ return "Double (Low Half)";
+}
+
+std::string DoubleHiType::Dump() const {
+ return "Double (High Half)";
+}
+
+std::string IntegerType::Dump() const {
+ return "Integer";
+}
+
+DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new DoubleHiType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+DoubleHiType* DoubleHiType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void DoubleHiType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new DoubleLoType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+DoubleLoType* DoubleLoType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void DoubleLoType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new LongLoType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new LongHiType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+LongHiType* LongHiType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void LongHiType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+LongLoType* LongLoType::GetInstance() {
+ CHECK (instance_ != NULL);
+ return instance_;
+}
+
+void LongLoType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new FloatType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+FloatType* FloatType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void FloatType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new CharType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+CharType* CharType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void CharType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new ShortType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+ShortType* ShortType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void ShortType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new ByteType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+ByteType* ByteType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void ByteType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new IntegerType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+IntegerType* IntegerType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void IntegerType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+ConflictType* ConflictType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new ConflictType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+ConflictType* ConflictType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void ConflictType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (BooleanType::instance == NULL) {
+ instance = new BooleanType(klass, descriptor, cache_id);
+ }
+ return BooleanType::instance;
+}
+
+BooleanType* BooleanType::GetInstance() {
+ CHECK(BooleanType::instance != NULL);
+ return BooleanType::instance;
+}
+
+void BooleanType::Destroy() {
+ if(BooleanType::instance != NULL) {
+ delete instance;
+ instance = NULL;
+ }
+}
+
+std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return "Undefined";
+}
+
+UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id) {
+ if (instance_ == NULL) {
+ instance_ = new UndefinedType(klass, descriptor, cache_id);
+ }
+ return instance_;
+}
+
+UndefinedType* UndefinedType::GetInstance() {
+ CHECK(instance_ != NULL);
+ return instance_;
+}
+
+void UndefinedType::Destroy() {
+ if (instance_ != NULL) {
+ delete instance_;
+ instance_ = NULL;
+ }
+}
+
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ : RegType(klass, descriptor, cache_id) {
+ DCHECK(klass->IsInstantiable());
+}
+
+std::string UnresolvedMergedType::Dump() const {
+ std::stringstream result;
+ std::set<uint16_t> types = GetMergedTypes();
+ result << "UnresolvedMergedReferences(";
+ typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
+ It it = types.begin();
+ result << reg_type_cache_->GetFromId(*it).Dump();
+ for (++it; it != types.end(); ++it) {
+ result << ", ";
+ result << reg_type_cache_->GetFromId(*it).Dump();
+ }
+ result << ")";
+ return result.str();
+}
+
+std::string UnresolvedSuperClass::Dump() const {
+ std::stringstream result;
+ uint16_t super_type_id = GetUnresolvedSuperClassChildId();
+ result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
+ return result.str();
+}
+
+std::string UnresolvedReferenceType::Dump() const {
+ std::stringstream result;
+ result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
+ return result.str();
+}
+
+std::string UnresolvedUninitializedRefType::Dump() const {
+ std::stringstream result;
+ result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
+ result << " Allocation PC: " << GetAllocationPc();
+ return result.str();
+}
+
+std::string UnresolvedUninitializedThisRefType::Dump() const {
+ std::stringstream result;
+ result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
+ return result.str();
+}
+
+std::string ReferenceType::Dump() const {
+ std::stringstream result;
+ result << "Reference" << ": " << PrettyDescriptor(GetClass());
+ return result.str();
+}
+
+std::string PreciseReferenceType::Dump() const {
+ std::stringstream result;
+ result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
+ return result.str();
+}
+
+std::string UninitializedReferenceType::Dump() const {
+ std::stringstream result;
+ result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
+ result << " Allocation PC: " << GetAllocationPc();
+ return result.str();
+}
+
+std::string UninitializedThisReferenceType::Dump() const {
+ std::stringstream result;
+ result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
+ result << "Allocation PC: " << GetAllocationPc();
+ return result.str();
+}
+
+std::string ImpreciseConstType::Dump() const {
+ std::stringstream result;
+ uint32_t val = ConstantValue();
+ if (val == 0) {
+ CHECK(IsPreciseConstant());
+ result << "Zero/null";
+ } else {
+ result << "Imprecise ";
+ if (IsConstantShort()) {
+ result << StringPrintf("Constant: %d", val);
+ } else {
+ result << StringPrintf("Constant: 0x%x", val);
+ }
+ }
+ return result.str();
+}
+std::string PreciseConstLoType::Dump() const {
+ std::stringstream result;
+
+ int32_t val = ConstantValueLo();
+ result << "Precise ";
+ if (val >= std::numeric_limits<jshort>::min() &&
+ val <= std::numeric_limits<jshort>::max()) {
+ result << StringPrintf("Low-half Constant: %d", val);
+ } else {
+ result << StringPrintf("Low-half Constant: 0x%x", val);
+ }
+ return result.str();
+}
+
+std::string ImpreciseConstLoType::Dump() const {
+ std::stringstream result;
+
+ int32_t val = ConstantValueLo();
+ result << "Imprecise ";
+ if (val >= std::numeric_limits<jshort>::min() &&
+ val <= std::numeric_limits<jshort>::max()) {
+ result << StringPrintf("Low-half Constant: %d", val);
+ } else {
+ result << StringPrintf("Low-half Constant: 0x%x", val);
+ }
+ return result.str();
+}
+
+std::string PreciseConstHiType::Dump() const {
+ std::stringstream result;
+ int32_t val = ConstantValueHi();
+ result << "Precise ";
+ if (val >= std::numeric_limits<jshort>::min() &&
+ val <= std::numeric_limits<jshort>::max()) {
+ result << StringPrintf("High-half Constant: %d", val);
+ } else {
+ result << StringPrintf("High-half Constant: 0x%x", val);
+ }
+ return result.str();
+}
+
+std::string ImpreciseConstHiType::Dump() const {
+ std::stringstream result;
+ int32_t val = ConstantValueHi();
+ result << "Imprecise ";
+ if (val >= std::numeric_limits<jshort>::min() &&
+ val <= std::numeric_limits<jshort>::max()) {
+ result << StringPrintf("High-half Constant: %d", val);
+ } else {
+ result << StringPrintf("High-half Constant: 0x%x", val);
+ }
+ return result.str();
+}
+
+ConstantType::ConstantType(uint32_t constant, uint16_t cache_id)
+ : RegType(NULL, "", cache_id), constant_(constant) {
+}
+
+const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (incoming_type.IsUndefined()) {
+ return *this; // Undefined MERGE Undefined => Undefined
+ }
+ return reg_types->Conflict();
+}
+
+const RegType& RegType::HighHalf(RegTypeCache* cache) const {
+ DCHECK(IsLowHalf());
+ if (IsLongLo()) {
+ return cache->LongHi();
+ } else if (IsDoubleLo()) {
+ return cache->DoubleHi();
+ } else {
+ DCHECK(IsImpreciseConstantLo());
+ return cache->FromCat2ConstHi(ConstantValue(), false);
+ }
+}
+
+Primitive::Type RegType::GetPrimitiveType() const {
+ if (IsNonZeroReferenceTypes()) {
+ return Primitive::kPrimNot;
+ } else if (IsBooleanTypes()) {
+ return Primitive::kPrimBoolean;
+ } else if (IsByteTypes()) {
+ return Primitive::kPrimByte;
+ } else if (IsShortTypes()) {
+ return Primitive::kPrimShort;
+ } else if (IsCharTypes()) {
+ return Primitive::kPrimChar;
+ } else if (IsFloat()) {
+ return Primitive::kPrimFloat;
+ } else if (IsIntegralTypes()) {
+ return Primitive::kPrimInt;
+ } else if (IsDoubleLo()) {
+ return Primitive::kPrimDouble;
+ } else {
+ DCHECK(IsLongTypes());
+ return Primitive::kPrimLong;
+ }
+}
+
+bool UninitializedType::IsUninitializedTypes() const {
+ return true;
+}
+
+bool UninitializedType::IsNonZeroReferenceTypes() const {
+ return true;
+}
+
+bool UnresolvedType::IsNonZeroReferenceTypes() const {
+ return true;
+}
+std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
+ std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
+ const RegType& _left(reg_type_cache_->GetFromId(refs.first));
+ RegType& __left(const_cast<RegType&>(_left));
+ UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
+
+ RegType& _right(
+ const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
+ UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
+
+ std::set<uint16_t> types;
+ if (left->IsUnresolvedMergedReference()) {
+ types = left->GetMergedTypes();
+ } else {
+ types.insert(refs.first);
+ }
+ if (right->IsUnresolvedMergedReference()) {
+ std::set<uint16_t> right_types = right->GetMergedTypes();
+ types.insert(right_types.begin(), right_types.end());
+ } else {
+ types.insert(refs.second);
+ }
+ if (kIsDebugBuild) {
+ typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
+ for (It it = types.begin(); it != types.end(); ++it) {
+ CHECK(!reg_type_cache_->GetFromId(*it).IsUnresolvedMergedReference());
+ }
+ }
+ return types;
+}
+
+const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
+ if (!IsUnresolvedTypes()) {
+ mirror::Class* super_klass = GetClass()->GetSuperClass();
+ if (super_klass != NULL) {
+ // A super class of a precise type isn't precise as a precise type indicates the register
+ // holds exactly that type.
+ return cache->FromClass(ClassHelper(super_klass).GetDescriptor(), super_klass, false);
+ } else {
+ return cache->Zero();
+ }
+ } else {
+ if (!IsUnresolvedMergedReference() && !IsUnresolvedSuperClass() &&
+ GetDescriptor()[0] == '[') {
+ // Super class of all arrays is Object.
+ return cache->JavaLangObject(true);
+ } else {
+ return cache->FromUnresolvedSuperClass(*this);
+ }
+ }
+}
+
+bool RegType::CanAccess(const RegType& other) const {
+ if (Equals(other)) {
+ return true; // Trivial accessibility.
+ } else {
+ bool this_unresolved = IsUnresolvedTypes();
+ bool other_unresolved = other.IsUnresolvedTypes();
+ if (!this_unresolved && !other_unresolved) {
+ return GetClass()->CanAccess(other.GetClass());
+ } else if (!other_unresolved) {
+ return other.GetClass()->IsPublic(); // Be conservative, only allow if other is public.
+ } else {
+ return false; // More complicated test not possible on unresolved types, be conservative.
+ }
+ }
+}
+
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+ if ((access_flags & kAccPublic) != 0) {
+ return true;
+ }
+ if (!IsUnresolvedTypes()) {
+ return GetClass()->CanAccessMember(klass, access_flags);
+ } else {
+ return false; // More complicated test not possible on unresolved types, be conservative.
+ }
+}
+
+bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
+ // Primitive arrays will always resolve
+ DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
+ return descriptor_[0] == '[';
+ } else if (HasClass()) {
+ mirror::Class* type = GetClass();
+ return type->IsArrayClass() && !type->GetComponentType()->IsPrimitive();
+ } else {
+ return false;
+ }
+}
+
+bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return IsReference() && GetClass()->IsObjectClass();
+}
+
+bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
+ return descriptor_[0] == '[';
+ } else if (HasClass()) {
+ return GetClass()->IsArrayClass();
+ } else {
+ return false;
+ }
+}
+
+bool RegType::IsJavaLangObjectArray() const {
+ if (HasClass()) {
+ mirror::Class* type = GetClass();
+ return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
+ }
+ return false;
+}
+
+bool RegType::IsInstantiableTypes() const {
+ return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
+}
+
+ImpreciseConstType::ImpreciseConstType(uint32_t constat, uint16_t cache_id)
+ : ConstantType(constat, cache_id) {
+}
+
+static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (lhs.Equals(rhs)) {
+ return true;
+ } else {
+ if (lhs.IsBoolean()) {
+ return rhs.IsBooleanTypes();
+ } else if (lhs.IsByte()) {
+ return rhs.IsByteTypes();
+ } else if (lhs.IsShort()) {
+ return rhs.IsShortTypes();
+ } else if (lhs.IsChar()) {
+ return rhs.IsCharTypes();
+ } else if (lhs.IsInteger()) {
+ return rhs.IsIntegralTypes();
+ } else if (lhs.IsFloat()) {
+ return rhs.IsFloatTypes();
+ } else if (lhs.IsLongLo()) {
+ return rhs.IsLongTypes();
+ } else if (lhs.IsDoubleLo()) {
+ return rhs.IsDoubleTypes();
+ } else {
+ CHECK(lhs.IsReferenceTypes())
+ << "Unexpected register type in IsAssignableFrom: '"
+ << lhs << "' := '" << rhs << "'";
+ if (rhs.IsZero()) {
+ return true; // All reference types can be assigned null.
+ } else if (!rhs.IsReferenceTypes()) {
+ return false; // Expect rhs to be a reference type.
+ } else if (lhs.IsJavaLangObject()) {
+ return true; // All reference types can be assigned to Object.
+ } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
+ // If we're not strict allow assignment to any interface, see comment in ClassJoin.
+ return true;
+ } else if (lhs.IsJavaLangObjectArray()) {
+ return rhs.IsObjectArrayTypes(); // All reference arrays may be assigned to Object[]
+ } else if (lhs.HasClass() && rhs.HasClass() &&
+ lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) {
+ // We're assignable from the Class point-of-view.
+ return true;
+ } else {
+ // Unresolved types are only assignable for null and equality.
+ return false;
+ }
+ }
+ }
+}
+
+bool RegType::IsAssignableFrom(const RegType& src) const {
+ return AssignableFrom(*this, src, false);
+}
+
+bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+ return AssignableFrom(*this, src, true);
+}
+
+int32_t ConstantType::ConstantValue() const {
+ DCHECK(IsConstantTypes());
+ return constant_;
+}
+
+int32_t ConstantType::ConstantValueLo() const {
+ DCHECK(IsConstantLo());
+ return constant_;
+}
+
+int32_t ConstantType::ConstantValueHi() const {
+ if (IsConstantHi() || IsPreciseConstantHi() || IsImpreciseConstantHi()) {
+ return constant_;
+ } else {
+ DCHECK(false);
+ return 0;
+ }
+}
+
+static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
+ return a.IsConstant() ? b : a;
+}
+
+const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
+ DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller
+ if (IsConflict()) {
+ return *this; // Conflict MERGE * => Conflict
+ } else if (incoming_type.IsConflict()) {
+ return incoming_type; // * MERGE Conflict => Conflict
+ } else if (IsUndefined() || incoming_type.IsUndefined()) {
+ return reg_types->Conflict(); // Unknown MERGE * => Conflict
+ } else if (IsConstant() && incoming_type.IsConstant()) {
+ int32_t val1 = ConstantValue();
+ int32_t val2 = incoming_type.ConstantValue();
+ if (val1 >= 0 && val2 >= 0) {
+ // +ve1 MERGE +ve2 => MAX(+ve1, +ve2)
+ if (val1 >= val2) {
+ if (!IsPreciseConstant()) {
+ return *this;
+ } else {
+ return reg_types->FromCat1Const(val1, false);
+ }
+ } else {
+ if (!incoming_type.IsPreciseConstant()) {
+ return incoming_type;
+ } else {
+ return reg_types->FromCat1Const(val2, false);
+ }
+ }
+ } else if (val1 < 0 && val2 < 0) {
+ // -ve1 MERGE -ve2 => MIN(-ve1, -ve2)
+ if (val1 <= val2) {
+ if (!IsPreciseConstant()) {
+ return *this;
+ } else {
+ return reg_types->FromCat1Const(val1, false);
+ }
+ } else {
+ if (!incoming_type.IsPreciseConstant()) {
+ return incoming_type;
+ } else {
+ return reg_types->FromCat1Const(val2, false);
+ }
+ }
+ } else {
+ // Values are +ve and -ve, choose smallest signed type in which they both fit
+ if (IsConstantByte()) {
+ if (incoming_type.IsConstantByte()) {
+ return reg_types->ByteConstant();
+ } else if (incoming_type.IsConstantShort()) {
+ return reg_types->ShortConstant();
+ } else {
+ return reg_types->IntConstant();
+ }
+ } else if (IsConstantShort()) {
+ if (incoming_type.IsConstantShort()) {
+ return reg_types->ShortConstant();
+ } else {
+ return reg_types->IntConstant();
+ }
+ } else {
+ return reg_types->IntConstant();
+ }
+ }
+ } else if (IsConstantLo() && incoming_type.IsConstantLo()) {
+ int32_t val1 = ConstantValueLo();
+ int32_t val2 = incoming_type.ConstantValueLo();
+ return reg_types->FromCat2ConstLo(val1 | val2, false);
+ } else if (IsConstantHi() && incoming_type.IsConstantHi()) {
+ int32_t val1 = ConstantValueHi();
+ int32_t val2 = incoming_type.ConstantValueHi();
+ return reg_types->FromCat2ConstHi(val1 | val2, false);
+ } else if (IsIntegralTypes() && incoming_type.IsIntegralTypes()) {
+ if (IsBooleanTypes() && incoming_type.IsBooleanTypes()) {
+ return reg_types->Boolean(); // boolean MERGE boolean => boolean
+ }
+ if (IsByteTypes() && incoming_type.IsByteTypes()) {
+ return reg_types->Byte(); // byte MERGE byte => byte
+ }
+ if (IsShortTypes() && incoming_type.IsShortTypes()) {
+ return reg_types->Short(); // short MERGE short => short
+ }
+ if (IsCharTypes() && incoming_type.IsCharTypes()) {
+ return reg_types->Char(); // char MERGE char => char
+ }
+ return reg_types->Integer(); // int MERGE * => int
+ } else if ((IsFloatTypes() && incoming_type.IsFloatTypes()) ||
+ (IsLongTypes() && incoming_type.IsLongTypes()) ||
+ (IsLongHighTypes() && incoming_type.IsLongHighTypes()) ||
+ (IsDoubleTypes() && incoming_type.IsDoubleTypes()) ||
+ (IsDoubleHighTypes() && incoming_type.IsDoubleHighTypes())) {
+ // check constant case was handled prior to entry
+ DCHECK(!IsConstant() || !incoming_type.IsConstant());
+ // float/long/double MERGE float/long/double_constant => float/long/double
+ return SelectNonConstant(*this, incoming_type);
+ } else if (IsReferenceTypes() && incoming_type.IsReferenceTypes()) {
+ if (IsZero() || incoming_type.IsZero()) {
+ return SelectNonConstant(*this, incoming_type); // 0 MERGE ref => ref
+ } else if (IsJavaLangObject() || incoming_type.IsJavaLangObject()) {
+ return reg_types->JavaLangObject(false); // Object MERGE ref => Object
+ } else if (IsUnresolvedTypes() || incoming_type.IsUnresolvedTypes()) {
+ // We know how to merge an unresolved type with itself, 0 or Object. In this case we
+ // have two sub-classes and don't know how to merge. Create a new string-based unresolved
+ // type that reflects our lack of knowledge and that allows the rest of the unresolved
+ // mechanics to continue.
+ return reg_types->FromUnresolvedMerge(*this, incoming_type);
+ } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes()) {
+ // Something that is uninitialized hasn't had its constructor called. Mark any merge
+ // of this type with something that is initialized as conflicting. The cases of a merge
+ // with itself, 0 or Object are handled above.
+ return reg_types->Conflict();
+ } else { // Two reference types, compute Join
+ mirror::Class* c1 = GetClass();
+ mirror::Class* c2 = incoming_type.GetClass();
+ DCHECK(c1 != NULL && !c1->IsPrimitive());
+ DCHECK(c2 != NULL && !c2->IsPrimitive());
+ mirror::Class* join_class = ClassJoin(c1, c2);
+ if (c1 == join_class && !IsPreciseReference()) {
+ return *this;
+ } else if (c2 == join_class && !incoming_type.IsPreciseReference()) {
+ return incoming_type;
+ } else {
+ return reg_types->FromClass(ClassHelper(join_class).GetDescriptor(), join_class, false);
+ }
+ }
+ } else {
+ return reg_types->Conflict(); // Unexpected types => Conflict
+ }
+}
+
+// See comment in reg_type.h
+mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) {
+ DCHECK(!s->IsPrimitive()) << PrettyClass(s);
+ DCHECK(!t->IsPrimitive()) << PrettyClass(t);
+ if (s == t) {
+ return s;
+ } else if (s->IsAssignableFrom(t)) {
+ return s;
+ } else if (t->IsAssignableFrom(s)) {
+ return t;
+ } else if (s->IsArrayClass() && t->IsArrayClass()) {
+ mirror::Class* s_ct = s->GetComponentType();
+ mirror::Class* t_ct = t->GetComponentType();
+ if (s_ct->IsPrimitive() || t_ct->IsPrimitive()) {
+ // Given the types aren't the same, if either array is of primitive types then the only
+ // common parent is java.lang.Object
+ mirror::Class* result = s->GetSuperClass(); // short-cut to java.lang.Object
+ DCHECK(result->IsObjectClass());
+ return result;
+ }
+ mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ mirror::ClassLoader* class_loader = s->GetClassLoader();
+ std::string descriptor("[");
+ descriptor += ClassHelper(common_elem).GetDescriptor();
+ mirror::Class* array_class = class_linker->FindClass(descriptor.c_str(), class_loader);
+ DCHECK(array_class != NULL);
+ return array_class;
+ } else {
+ size_t s_depth = s->Depth();
+ size_t t_depth = t->Depth();
+ // Get s and t to the same depth in the hierarchy
+ if (s_depth > t_depth) {
+ while (s_depth > t_depth) {
+ s = s->GetSuperClass();
+ s_depth--;
+ }
+ } else {
+ while (t_depth > s_depth) {
+ t = t->GetSuperClass();
+ t_depth--;
+ }
+ }
+ // Go up the hierarchy until we get to the common parent
+ while (s != t) {
+ s = s->GetSuperClass();
+ t = t->GetSuperClass();
+ }
+ return s;
+ }
+}
+
+void RegType::CheckInvariants() const {
+ if (IsConstant() || IsConstantLo() || IsConstantHi()) {
+ CHECK(descriptor_.empty()) << *this;
+ CHECK(klass_ == NULL) << *this;
+ }
+ if (klass_ != NULL) {
+ CHECK(!descriptor_.empty()) << *this;
+ }
+}
+
+void UninitializedThisReferenceType::CheckInvariants() const {
+ CHECK_EQ(GetAllocationPc(), 0U) << *this;
+}
+
+void UnresolvedUninitializedThisRefType::CheckInvariants() const {
+ CHECK_EQ(GetAllocationPc(), 0U) << *this;
+ CHECK(!descriptor_.empty()) << *this;
+ CHECK(klass_ == NULL) << *this;
+}
+
+void UnresolvedUninitializedRefType::CheckInvariants() const {
+ CHECK(!descriptor_.empty()) << *this;
+ CHECK(klass_ == NULL) << *this;
+}
+
+void UnresolvedMergedType::CheckInvariants() const {
+ // Unresolved merged types: merged types should be defined.
+ CHECK(descriptor_.empty()) << *this;
+ CHECK(klass_ == NULL) << *this;
+ CHECK_NE(merged_types_.first, 0U) << *this;
+ CHECK_NE(merged_types_.second, 0U) << *this;
+}
+
+void UnresolvedReferenceType::CheckInvariants() const {
+ CHECK(!descriptor_.empty()) << *this;
+ CHECK(klass_ == NULL) << *this;
+}
+
+void UnresolvedSuperClass::CheckInvariants() const {
+ // Unresolved merged types: merged types should be defined.
+ CHECK(descriptor_.empty()) << *this;
+ CHECK(klass_ == NULL) << *this;
+ CHECK_NE(unresolved_child_id_, 0U) << *this;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
+ os << rhs.Dump();
+ return os;
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
new file mode 100644
index 0000000..9ac0eca
--- /dev/null
+++ b/runtime/verifier/reg_type.h
@@ -0,0 +1,925 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_REG_TYPE_H_
+#define ART_SRC_VERIFIER_REG_TYPE_H_
+
+#include "base/macros.h"
+#include "globals.h"
+#include "primitive.h"
+
+#include "jni.h"
+
+#include <limits>
+#include <stdint.h>
+#include <set>
+#include <string>
+
+namespace art {
+namespace mirror {
+class Class;
+} // namespace mirror
+namespace verifier {
+
+class RegTypeCache;
+/*
+ * RegType holds information about the "type" of data held in a register.
+ */
+class RegType {
+ public:
+ virtual bool IsUndefined() const { return false; }
+ virtual bool IsConflict() const { return false; }
+ virtual bool IsBoolean() const { return false; }
+ virtual bool IsByte() const { return false; }
+ virtual bool IsChar() const { return false; }
+ virtual bool IsShort() const { return false; }
+ virtual bool IsInteger() const { return false; }
+ virtual bool IsLongLo() const { return false; }
+ virtual bool IsLongHi() const { return false; }
+ virtual bool IsFloat() const { return false; }
+ virtual bool IsDouble() const { return false; }
+ virtual bool IsDoubleLo() const { return false; }
+ virtual bool IsDoubleHi() const { return false; }
+ virtual bool IsUnresolvedReference() const { return false; }
+ virtual bool IsUninitializedReference() const { return false; }
+ virtual bool IsUninitializedThisReference() const { return false; }
+ virtual bool IsUnresolvedAndUninitializedReference() const { return false; }
+ virtual bool IsUnresolvedAndUninitializedThisReference() const { return false; }
+ virtual bool IsUnresolvedMergedReference() const { return false; }
+ virtual bool IsUnresolvedSuperClass() const { return false; }
+ virtual bool IsReference() const { return false; }
+ virtual bool IsPreciseReference() const { return false; }
+ virtual bool IsPreciseConstant() const { return false; }
+ virtual bool IsPreciseConstantLo() const { return false; }
+ virtual bool IsPreciseConstantHi() const { return false; }
+ virtual bool IsImpreciseConstantLo() const { return false; }
+ virtual bool IsImpreciseConstantHi() const { return false; }
+ virtual bool IsImpreciseConstant() const { return false; }
+ virtual bool IsConstantTypes() const { return false; }
+ bool IsConstant() const {
+ return IsPreciseConstant() || IsImpreciseConstant();
+ }
+ bool IsConstantLo() const {
+ return IsPreciseConstantLo() || IsImpreciseConstantLo();
+ }
+ bool IsPrecise() const {
+ return IsPreciseConstantLo() || IsPreciseConstant() || IsPreciseConstantHi();
+ }
+ bool IsLongConstant() const {
+ return IsConstantLo();
+ }
+ bool IsConstantHi() const {
+ return (IsPreciseConstantHi() || IsImpreciseConstantHi());
+ }
+ bool IsLongConstantHigh() const {
+ return IsConstantHi();
+ }
+ virtual bool IsUninitializedTypes() const { return false; }
+ bool IsUnresolvedTypes() const {
+ return IsUnresolvedReference() || IsUnresolvedAndUninitializedReference() ||
+ IsUnresolvedAndUninitializedThisReference() ||
+ IsUnresolvedMergedReference() || IsUnresolvedSuperClass();
+ }
+
+ bool IsLowHalf() const {
+ return (IsLongLo() || IsDoubleLo() || IsPreciseConstantLo() ||
+ IsImpreciseConstantLo());
+ }
+ bool IsHighHalf() const {
+ return (IsLongHi() || IsDoubleHi() || IsPreciseConstantHi() ||
+ IsImpreciseConstantHi());
+ }
+ bool IsLongOrDoubleTypes() const {
+ return IsLowHalf();
+ }
+ // Check this is the low half, and that type_h is its matching high-half.
+ inline bool CheckWidePair(const RegType& type_h) const {
+ if (IsLowHalf()) {
+ return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
+ (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
+ (IsImpreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
+ (IsImpreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
+ (IsDoubleLo() && type_h.IsDoubleHi()) ||
+ (IsLongLo() && type_h.IsLongHi()));
+ }
+ return false;
+ }
+ // The high half that corresponds to this low half
+ const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsConstantBoolean() const {
+ return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
+ }
+ virtual bool IsConstantChar() const {
+ return false;
+ }
+ virtual bool IsConstantByte() const {
+ return false;
+ }
+ virtual bool IsConstantShort() const {
+ return false;
+ }
+ virtual bool IsOne() const {
+ return false;
+ }
+ virtual bool IsZero() const {
+ return false;
+ }
+ bool IsReferenceTypes() const {
+ return IsNonZeroReferenceTypes() || IsZero();
+ }
+ virtual bool IsNonZeroReferenceTypes() const {
+ return false;
+ }
+ bool IsCategory1Types() const {
+ return IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() || IsShort() ||
+ IsBoolean();
+ }
+ bool IsCategory2Types() const {
+ return IsLowHalf(); // Don't expect explicit testing of high halves
+ }
+ bool IsBooleanTypes() const {
+ return IsBoolean() || IsConstantBoolean();
+ }
+ bool IsByteTypes() const {
+ return IsConstantByte() || IsByte() || IsBoolean();
+ }
+ bool IsShortTypes() const {
+ return IsShort() || IsByte() || IsBoolean() || IsConstantShort();
+ }
+ bool IsCharTypes() const {
+ return IsChar() || IsBooleanTypes() || IsConstantChar();
+ }
+ bool IsIntegralTypes() const {
+ return IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() || IsBoolean();
+ }
+ // Give the constant value encoded, but this shouldn't be called in the general case.
+ virtual int32_t ConstantValue() const;
+ virtual int32_t ConstantValueLo() const;
+ virtual int32_t ConstantValueHi() const;
+ bool IsArrayIndexTypes() const {
+ return IsIntegralTypes();
+ }
+ // Float type may be derived from any constant type
+ bool IsFloatTypes() const {
+ return IsFloat() || IsConstant();
+ }
+ bool IsLongTypes() const {
+ return IsLongLo() || IsLongConstant();
+ }
+ bool IsLongHighTypes() const {
+ return (IsLongHi() ||
+ IsPreciseConstantHi() ||
+ IsImpreciseConstantHi());
+ }
+ bool IsDoubleTypes() const {
+ return IsDoubleLo() || IsLongConstant();
+ }
+ bool IsDoubleHighTypes() const {
+ return (IsDoubleHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
+ }
+ virtual bool IsLong() const {
+ return false;
+ }
+ virtual bool HasClass() const {
+ return false;
+ }
+ bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Primitive::Type GetPrimitiveType() const ;
+ bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() const;
+ const std::string& GetDescriptor() const {
+ DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
+ !IsUnresolvedSuperClass()));
+ return descriptor_;
+ }
+ mirror::Class* GetClass() const {
+ DCHECK(!IsUnresolvedReference());
+ DCHECK(klass_ != NULL);
+ DCHECK(HasClass());
+ return klass_;
+ }
+ uint16_t GetId() const {
+ return cache_id_;
+ }
+ const RegType& GetSuperClass(RegTypeCache* cache) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
+ // Can this type access other?
+ bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can this type access a member with the given properties?
+ bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can this type be assigned by src?
+ // Note: Object and interface types may always be assigned to one another, see comment on
+ // ClassJoin.
+ bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
+ // an interface from an Object.
+ bool IsStrictlyAssignableFrom(const RegType& src) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Are these RegTypes the same?
+ bool Equals(const RegType& other) const {
+ return GetId() == other.GetId();
+ }
+
+ // Compute the merge of this register from one edge (path) with incoming_type from another.
+ virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
+ * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
+ * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
+ * is the deepest (lowest upper bound) parent of S and T).
+ *
+ * This operation applies for regular classes and arrays, however, for interface types there
+ * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
+ * order by introducing sets of types, however, the only operation permissible on an interface is
+ * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
+ * types until an invoke-interface call on the interface typed reference at runtime and allow
+ * the perversion of Object being assignable to an interface type (note, however, that we don't
+ * allow assignment of Object or Interface to any concrete class and are therefore type safe).
+ *
+ * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
+ */
+ static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ virtual ~RegType() {}
+
+ protected:
+ RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+
+ const std::string descriptor_;
+ mirror::Class* const klass_;
+ const uint16_t cache_id_;
+
+ friend class RegTypeCache;
+
+ DISALLOW_COPY_AND_ASSIGN(RegType);
+};
+
+// Bottom type.
+class ConflictType : public RegType {
+ public:
+ bool IsConflict() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get the singleton Conflict instance.
+ static ConflictType* GetInstance();
+
+ // Create the singleton instance.
+ static ConflictType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Destroy the singleton instance.
+ static void Destroy();
+ private:
+ ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ }
+
+ static ConflictType* instance_;
+};
+
+// A variant of the bottom type used to specify an undefined value in the incoming registers.
+// Merging with UndefinedType yields ConflictType which is the true bottom.
+class UndefinedType : public RegType {
+ public:
+ bool IsUndefined() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get the singleton Undefined instance.
+ static UndefinedType* GetInstance();
+
+ // Create the singleton instance.
+ static UndefinedType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Destroy the singleton instance.
+ static void Destroy();
+ private:
+ UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ }
+
+ virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static UndefinedType* instance_;
+};
+
+class PrimitiveType : public RegType {
+ public:
+ PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class Cat1Type : public PrimitiveType {
+ public:
+ Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class IntegerType : public Cat1Type {
+ public:
+ bool IsInteger() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static IntegerType* GetInstance();
+ static void Destroy();
+ private:
+ IntegerType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+ static IntegerType* instance_;
+};
+
+class BooleanType : public Cat1Type {
+ public:
+ bool IsBoolean() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static BooleanType* GetInstance();
+ static void Destroy();
+ private:
+ BooleanType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+
+ static BooleanType* instance;
+};
+
+class ByteType : public Cat1Type {
+ public:
+ bool IsByte() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static ByteType* GetInstance();
+ static void Destroy();
+ private:
+ ByteType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+ static ByteType* instance_;
+};
+
+class ShortType : public Cat1Type {
+ public:
+ bool IsShort() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static ShortType* GetInstance();
+ static void Destroy();
+ private:
+ ShortType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+ static ShortType* instance_;
+};
+
+class CharType : public Cat1Type {
+ public:
+ bool IsChar() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static CharType* GetInstance();
+ static void Destroy();
+ private:
+ CharType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+ static CharType* instance_;
+};
+
+class FloatType : public Cat1Type {
+ public:
+ bool IsFloat() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static FloatType* GetInstance();
+ static void Destroy();
+ private:
+ FloatType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat1Type(klass, descriptor, cache_id) {
+ }
+ static FloatType* instance_;
+};
+
+class Cat2Type : public PrimitiveType {
+ public:
+ Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class LongLoType : public Cat2Type {
+ public:
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsLongLo() const {
+ return true;
+ }
+ bool IsLong() const {
+ return true;
+ }
+ static LongLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static LongLoType* GetInstance();
+ static void Destroy();
+ private:
+ LongLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
+ static LongLoType* instance_;
+};
+
+class LongHiType : public Cat2Type {
+ public:
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsLongHi() const {
+ return true;
+ }
+ static LongHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static LongHiType* GetInstance();
+ static void Destroy();
+ private:
+ LongHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
+ static LongHiType* instance_;
+};
+
+class DoubleLoType : public Cat2Type {
+ public:
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsDoubleLo() const {
+ return true;
+ }
+ bool IsDouble() const {
+ return true;
+ }
+ static DoubleLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static DoubleLoType* GetInstance();
+ static void Destroy();
+ private:
+ DoubleLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
+ static DoubleLoType* instance_;
+};
+
+class DoubleHiType : public Cat2Type {
+ public:
+ std::string Dump() const;
+ virtual bool IsDoubleHi() const {
+ return true;
+ }
+ static DoubleHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static DoubleHiType* GetInstance();
+ static void Destroy();
+ private:
+ DoubleHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : Cat2Type(klass, descriptor, cache_id) {
+ }
+ static DoubleHiType* instance_;
+};
+
+class ConstantType : public RegType {
+ public:
+ ConstantType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // If this is a 32-bit constant, what is the value? This value may be imprecise in which case
+ // the value represents part of the integer range of values that may be held in the register.
+ virtual int32_t ConstantValue() const;
+ virtual int32_t ConstantValueLo() const;
+ virtual int32_t ConstantValueHi() const;
+
+ bool IsZero() const {
+ return IsPreciseConstant() && ConstantValue() == 0;
+ }
+ bool IsOne() const {
+ return IsPreciseConstant() && ConstantValue() == 1;
+ }
+
+ bool IsConstantChar() const {
+ return IsConstant() && ConstantValue() >= 0 &&
+ ConstantValue() <= std::numeric_limits<jchar>::max();
+ }
+ bool IsConstantByte() const {
+ return IsConstant() &&
+ ConstantValue() >= std::numeric_limits<jbyte>::min() &&
+ ConstantValue() <= std::numeric_limits<jbyte>::max();
+ }
+ bool IsConstantShort() const {
+ return IsConstant() &&
+ ConstantValue() >= std::numeric_limits<jshort>::min() &&
+ ConstantValue() <= std::numeric_limits<jshort>::max();
+ }
+ virtual bool IsConstantTypes() const { return true; }
+
+ private:
+ const uint32_t constant_;
+};
+
+class PreciseConstType : public ConstantType {
+ public:
+ PreciseConstType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : ConstantType(constat, cache_id) {
+ }
+
+ bool IsPreciseConstant() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class PreciseConstLoType : public ConstantType {
+ public:
+ PreciseConstLoType(uint32_t constat, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : ConstantType(constat, cache_id) {
+ }
+ bool IsPreciseConstantLo() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class PreciseConstHiType : public ConstantType {
+ public:
+ PreciseConstHiType(uint32_t constat, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : ConstantType(constat, cache_id) {
+ }
+ bool IsPreciseConstantHi() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class ImpreciseConstType : public ConstantType {
+ public:
+ ImpreciseConstType(uint32_t constat, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsImpreciseConstant() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class ImpreciseConstLoType : public ConstantType {
+ public:
+ ImpreciseConstLoType(uint32_t constat, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstantType(constat, cache_id) {
+ }
+ bool IsImpreciseConstantLo() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class ImpreciseConstHiType : public ConstantType {
+ public:
+ ImpreciseConstHiType(uint32_t constat, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstantType(constat, cache_id) {
+ }
+ bool IsImpreciseConstantHi() const {
+ return true;
+ }
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
+// instructions and must be passed to a constructor.
+class UninitializedType : public RegType {
+ public:
+ UninitializedType(mirror::Class* klass, const std::string& descriptor, uint32_t allocation_pc,
+ uint16_t cache_id)
+ : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {
+ }
+
+ bool IsUninitializedTypes() const;
+ bool IsNonZeroReferenceTypes() const;
+
+ uint32_t GetAllocationPc() const {
+ DCHECK(IsUninitializedTypes());
+ return allocation_pc_;
+ }
+
+ private:
+ const uint32_t allocation_pc_;
+};
+
+// Similar to ReferenceType but not yet having been passed to a constructor.
+class UninitializedReferenceType : public UninitializedType {
+ public:
+ UninitializedReferenceType(mirror::Class* klass, const std::string& descriptor,
+ uint32_t allocation_pc, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(klass, descriptor, allocation_pc, cache_id) {
+ }
+
+ bool IsUninitializedReference() const {
+ return true;
+ }
+
+ bool HasClass() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
+class UnresolvedUninitializedRefType : public UninitializedType {
+ public:
+ UnresolvedUninitializedRefType(const std::string& descriptor, uint32_t allocation_pc,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ bool IsUnresolvedAndUninitializedReference() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// Similar to UninitializedReferenceType but special case for the this argument of a constructor.
+class UninitializedThisReferenceType : public UninitializedType {
+ public:
+ UninitializedThisReferenceType(mirror::Class* klass, const std::string& descriptor,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(klass, descriptor, 0, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ virtual bool IsUninitializedThisReference() const {
+ return true;
+ }
+
+ bool HasClass() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+class UnresolvedUninitializedThisRefType : public UninitializedType {
+ public:
+ UnresolvedUninitializedThisRefType(const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UninitializedType(NULL, descriptor, 0, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ bool IsUnresolvedAndUninitializedThisReference() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// A type of register holding a reference to an Object of type GetClass or a sub-class.
+class ReferenceType : public RegType {
+ public:
+ ReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ }
+
+ bool IsReference() const {
+ return true;
+ }
+
+ bool IsNonZeroReferenceTypes() const {
+ return true;
+ }
+
+ bool HasClass() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// A type of register holding a reference to an Object of type GetClass and only an object of that
+// type.
+class PreciseReferenceType : public RegType {
+ public:
+ PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool IsPreciseReference() const {
+ return true;
+ }
+
+ bool IsNonZeroReferenceTypes() const {
+ return true;
+ }
+
+ bool HasClass() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// Common parent of unresolved types.
+class UnresolvedType : public RegType {
+ public:
+ UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : RegType(NULL, descriptor, cache_id) {
+ }
+
+ bool IsNonZeroReferenceTypes() const;
+};
+
+// Similar to ReferenceType except the Class couldn't be loaded. Assignability and other tests made
+// of this type must be conservative.
+class UnresolvedReferenceType : public UnresolvedType {
+ public:
+ UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ bool IsUnresolvedReference() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+// Type representing the super-class of an unresolved type.
+class UnresolvedSuperClass : public UnresolvedType {
+ public:
+ UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache, uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UnresolvedType("", cache_id), unresolved_child_id_(child_id),
+ reg_type_cache_(reg_type_cache) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ bool IsUnresolvedSuperClass() const {
+ return true;
+ }
+
+ uint16_t GetUnresolvedSuperClassChildId() const {
+ DCHECK(IsUnresolvedSuperClass());
+ return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const uint16_t unresolved_child_id_;
+ const RegTypeCache* const reg_type_cache_;
+};
+
+// A merge of two unresolved types. If the types were resolved this may be Conflict or another
+// known ReferenceType.
+class UnresolvedMergedType : public UnresolvedType {
+ public:
+ UnresolvedMergedType(uint16_t left_id, uint16_t right_id, const RegTypeCache* reg_type_cache,
+ uint16_t cache_id)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : UnresolvedType("", cache_id), reg_type_cache_(reg_type_cache) ,merged_types_(left_id, right_id) {
+ if (kIsDebugBuild) {
+ CheckInvariants();
+ }
+ }
+
+ // The top of a tree of merged types.
+ std::pair<uint16_t, uint16_t> GetTopMergedTypes() const {
+ DCHECK(IsUnresolvedMergedReference());
+ return merged_types_;
+ }
+
+ // The complete set of merged types.
+ std::set<uint16_t> GetMergedTypes() const;
+
+ bool IsUnresolvedMergedReference() const {
+ return true;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
+ void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ const RegTypeCache* const reg_type_cache_;
+ const std::pair<uint16_t, uint16_t> merged_types_;
+};
+
+std::ostream& operator<<(std::ostream& os, const RegType& rhs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_REG_TYPE_H_
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
new file mode 100644
index 0000000..42474d1
--- /dev/null
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_REG_TYPE_CACHE_INL_H_
+#define ART_SRC_VERIFIER_REG_TYPE_CACHE_INL_H_
+
+#include "reg_type.h"
+#include "reg_type_cache.h"
+#include "class_linker.h"
+
+namespace art {
+namespace verifier {
+template <class Type>
+Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
+ mirror::Class* klass = NULL;
+ // Try loading the class from linker.
+ if (!descriptor.empty()) {
+ klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(descriptor.c_str());
+ }
+ Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
+ RegTypeCache::primitive_count_++;
+ return entry;
+}
+
+inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+ DCHECK_LT(id, entries_.size());
+ RegType* result = entries_[id];
+ DCHECK(result != NULL);
+ return *result;
+}
+} // namespace verifier
+} // namespace art
+#endif // ART_SRC_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
new file mode 100644
index 0000000..6013250
--- /dev/null
+++ b/runtime/verifier/reg_type_cache.cc
@@ -0,0 +1,523 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reg_type_cache-inl.h"
+
+#include "base/casts.h"
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+
+namespace art {
+namespace verifier {
+
+bool RegTypeCache::primitive_initialized_ = false;
+uint16_t RegTypeCache::primitive_start_ = 0;
+uint16_t RegTypeCache::primitive_count_ = 0;
+
+static bool MatchingPrecisionForClass(RegType* entry, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (entry->IsPreciseReference() == precise) {
+ // We were or weren't looking for a precise reference and we found what we need.
+ return true;
+ } else {
+ if (!precise && entry->GetClass()->CannotBeAssignedFromOtherTypes()) {
+ // We weren't looking for a precise reference, as we're looking up based on a descriptor, but
+ // we found a matching entry based on the descriptor. Return the precise entry in that case.
+ return true;
+ }
+ return false;
+ }
+}
+
+void RegTypeCache::FillPrimitiveTypes() {
+ entries_.push_back(UndefinedType::GetInstance());
+ entries_.push_back(ConflictType::GetInstance());
+ entries_.push_back(BooleanType::GetInstance());
+ entries_.push_back(ByteType::GetInstance());
+ entries_.push_back(ShortType::GetInstance());
+ entries_.push_back(CharType::GetInstance());
+ entries_.push_back(IntegerType::GetInstance());
+ entries_.push_back(LongLoType::GetInstance());
+ entries_.push_back(LongHiType::GetInstance());
+ entries_.push_back(FloatType::GetInstance());
+ entries_.push_back(DoubleLoType::GetInstance());
+ entries_.push_back(DoubleHiType::GetInstance());
+ DCHECK_EQ(entries_.size(), primitive_count_);
+}
+
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
+ DCHECK(RegTypeCache::primitive_initialized_);
+ if (descriptor[1] == '\0') {
+ switch (descriptor[0]) {
+ case 'Z':
+ return Boolean();
+ case 'B':
+ return Byte();
+ case 'S':
+ return Short();
+ case 'C':
+ return Char();
+ case 'I':
+ return Integer();
+ case 'J':
+ return LongLo();
+ case 'F':
+ return Float();
+ case 'D':
+ return DoubleLo();
+ case 'V': // For void types, conflict types.
+ default:
+ return Conflict();
+ }
+ } else if (descriptor[0] == 'L' || descriptor[0] == '[') {
+ return From(loader, descriptor, precise);
+ } else {
+ return Conflict();
+ }
+};
+
+const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+ CHECK(RegTypeCache::primitive_initialized_);
+ switch (prim_type) {
+ case Primitive::kPrimBoolean:
+ return *BooleanType::GetInstance();
+ case Primitive::kPrimByte:
+ return *ByteType::GetInstance();
+ case Primitive::kPrimShort:
+ return *ShortType::GetInstance();
+ case Primitive::kPrimChar:
+ return *CharType::GetInstance();
+ case Primitive::kPrimInt:
+ return *IntegerType::GetInstance();
+ case Primitive::kPrimLong:
+ return *LongLoType::GetInstance();
+ case Primitive::kPrimFloat:
+ return *FloatType::GetInstance();
+ case Primitive::kPrimDouble:
+ return *DoubleLoType::GetInstance();
+ case Primitive::kPrimVoid:
+ default:
+ return *ConflictType::GetInstance();
+ }
+}
+
+bool RegTypeCache::MatchDescriptor(size_t idx, const char* descriptor, bool precise) {
+ RegType* entry = entries_[idx];
+ if (entry->descriptor_ != descriptor) {
+ return false;
+ }
+ if (entry->HasClass()) {
+ return MatchingPrecisionForClass(entry, precise);
+ }
+ // There is no notion of precise unresolved references, the precise information is just dropped
+ // on the floor.
+ DCHECK(entry->IsUnresolvedReference());
+ return true;
+}
+
+mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassLoader* loader) {
+ // Class was not found, must create new type.
+ // Try resolving class
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ mirror::Class* klass = NULL;
+ if (can_load_classes_) {
+ klass = class_linker->FindClass(descriptor, loader);
+ } else {
+ klass = class_linker->LookupClass(descriptor, loader);
+ if (klass != NULL && !klass->IsLoaded()) {
+ // We found the class but without it being loaded its not safe for use.
+ klass = NULL;
+ }
+ }
+ return klass;
+}
+
+void RegTypeCache::ClearException() {
+ if (can_load_classes_) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ Thread::Current()->ClearException();
+ } else {
+ DCHECK(!Thread::Current()->IsExceptionPending());
+ }
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
+ // Try looking up the class in the cache first.
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ if (MatchDescriptor(i, descriptor, precise)) {
+ return *(entries_[i]);
+ }
+ }
+ // Class not found in the cache, will create a new type for that.
+ // Try resolving class.
+ mirror::Class* klass = ResolveClass(descriptor, loader);
+ if (klass != NULL) {
+ // Class resolved, first look for the class in the list of entries
+ // Class was not found, must create new type.
+ //To pass the verification, the type should be imprecise,
+ // instantiable or an interface with the precise type set to false.
+ DCHECK(!precise || klass->IsInstantiable());
+ // Create a precise type if:
+ // 1- Class is final and NOT an interface. a precise interface is meaningless !!
+ // 2- Precise Flag passed as true.
+ RegType* entry;
+ // Create an imprecise type if we can't tell for a fact that it is precise.
+ if (klass->CannotBeAssignedFromOtherTypes() || precise) {
+ DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
+ DCHECK(!klass->IsInterface());
+ entry = new PreciseReferenceType(klass, descriptor, entries_.size());
+ } else {
+ entry = new ReferenceType(klass, descriptor, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+ } else { // Class not resolved.
+ // We tried loading the class and failed, this might get an exception raised
+ // so we want to clear it before we go on.
+ ClearException();
+ if (IsValidDescriptor(descriptor)) {
+ RegType* entry = new UnresolvedReferenceType(descriptor, entries_.size());
+ entries_.push_back(entry);
+ return *entry;
+ } else {
+ // The descriptor is broken return the unknown type as there's nothing sensible that
+ // could be done at runtime
+ return Conflict();
+ }
+ }
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+ if (klass->IsPrimitive()) {
+ // Note: precise isn't used for primitive classes. A char is assignable to an int. All
+ // primitive classes are final.
+ return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+ } else {
+ // Look for the reference in the list of entries to have.
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
+ return *cur_entry;
+ }
+ }
+ // No reference to the class was found, create new reference.
+ RegType* entry;
+ if (precise) {
+ entry = new PreciseReferenceType(klass, descriptor, entries_.size());
+ } else {
+ entry = new ReferenceType(klass, descriptor, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+ }
+}
+
+RegTypeCache::~RegTypeCache() {
+ CHECK_LE(primitive_count_, entries_.size());
+ // Delete only the non primitive types.
+ if (entries_.size() == kNumPrimitives) {
+ // All entries are primitive, nothing to delete.
+ return;
+ }
+ std::vector<RegType*>::iterator non_primitive_begin = entries_.begin();
+ std::advance(non_primitive_begin, kNumPrimitives);
+ STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+}
+
+void RegTypeCache::ShutDown() {
+ if (RegTypeCache::primitive_initialized_) {
+ UndefinedType::Destroy();
+ ConflictType::Destroy();
+ BooleanType::Destroy();
+ ByteType::Destroy();
+ ShortType::Destroy();
+ CharType::Destroy();
+ IntegerType::Destroy();
+ LongLoType::Destroy();
+ LongHiType::Destroy();
+ FloatType::Destroy();
+ DoubleLoType::Destroy();
+ DoubleHiType::Destroy();
+ RegTypeCache::primitive_initialized_ = false;
+ RegTypeCache::primitive_count_ = 0;
+ }
+}
+
+void RegTypeCache::CreatePrimitiveTypes() {
+ CreatePrimitiveTypeInstance<UndefinedType>("");
+ CreatePrimitiveTypeInstance<ConflictType>("");
+ CreatePrimitiveTypeInstance<BooleanType>("Z");
+ CreatePrimitiveTypeInstance<ByteType>("B");
+ CreatePrimitiveTypeInstance<ShortType>("S");
+ CreatePrimitiveTypeInstance<CharType>("C");
+ CreatePrimitiveTypeInstance<IntegerType>("I");
+ CreatePrimitiveTypeInstance<LongLoType>("J");
+ CreatePrimitiveTypeInstance<LongHiType>("J");
+ CreatePrimitiveTypeInstance<FloatType>("F");
+ CreatePrimitiveTypeInstance<DoubleLoType>("D");
+ CreatePrimitiveTypeInstance<DoubleHiType>("D");
+}
+
+const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+ std::set<uint16_t> types;
+ if (left.IsUnresolvedMergedReference()) {
+ RegType& non_const(const_cast<RegType&>(left));
+ types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ } else {
+ types.insert(left.GetId());
+ }
+ if (right.IsUnresolvedMergedReference()) {
+ RegType& non_const(const_cast<RegType&>(right));
+ std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ types.insert(right_types.begin(), right_types.end());
+ } else {
+ types.insert(right.GetId());
+ }
+ // Check if entry already exists.
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedMergedReference()) {
+ std::set<uint16_t> cur_entry_types =
+ (down_cast<UnresolvedMergedType*>(cur_entry))->GetMergedTypes();
+ if (cur_entry_types == types) {
+ return *cur_entry;
+ }
+ }
+ }
+ // Create entry.
+ RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size());
+ entries_.push_back(entry);
+ if (kIsDebugBuild) {
+ UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
+ std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
+ CHECK(check_types == types);
+ }
+ return *entry;
+}
+
+const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+ // Check if entry already exists.
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedSuperClass()) {
+ UnresolvedSuperClass* tmp_entry =
+ down_cast<UnresolvedSuperClass*>(cur_entry);
+ uint16_t unresolved_super_child_id =
+ tmp_entry->GetUnresolvedSuperClassChildId();
+ if (unresolved_super_child_id == child.GetId()) {
+ return *cur_entry;
+ }
+ }
+ }
+ RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+ RegType* entry = NULL;
+ RegType* cur_entry = NULL;
+ const std::string& descriptor(type.GetDescriptor());
+ if (type.IsUnresolvedTypes()) {
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedAndUninitializedReference() &&
+ down_cast<UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc() == allocation_pc &&
+ (cur_entry->GetDescriptor() == descriptor)) {
+ return *cur_entry;
+ }
+ }
+ entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+ } else {
+ mirror::Class* klass = type.GetClass();
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ cur_entry = entries_[i];
+ if (cur_entry->IsUninitializedReference() &&
+ down_cast<UninitializedReferenceType*>(cur_entry)
+ ->GetAllocationPc() == allocation_pc &&
+ cur_entry->GetClass() == klass) {
+ return *cur_entry;
+ }
+ }
+ entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
+ RegType* entry;
+
+ if (uninit_type.IsUnresolvedTypes()) {
+ const std::string& descriptor(uninit_type.GetDescriptor());
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedReference() &&
+ cur_entry->GetDescriptor() == descriptor) {
+ return *cur_entry;
+ }
+ }
+ entry = new UnresolvedReferenceType(descriptor.c_str(), entries_.size());
+ } else {
+ mirror::Class* klass = uninit_type.GetClass();
+ if(uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
+ // For uninitialized "this reference" look for reference types that are not precise.
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsReference() && cur_entry->GetClass() == klass) {
+ return *cur_entry;
+ }
+ }
+ entry = new ReferenceType(klass, "", entries_.size());
+ } else if (klass->IsInstantiable()) {
+ // We're uninitialized because of allocation, look or create a precise type as allocations
+ // may only create objects of that type.
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) {
+ return *cur_entry;
+ }
+ }
+ entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+ } else {
+ return Conflict();
+ }
+ }
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::ByteConstant() {
+ return FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+}
+
+const RegType& RegTypeCache::ShortConstant() {
+ return FromCat1Const(std::numeric_limits<jshort>::min(), false);
+}
+
+const RegType& RegTypeCache::IntConstant() {
+ return FromCat1Const(std::numeric_limits<jint>::max(), false);
+}
+
+const RegType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+ RegType* entry;
+ const std::string& descriptor(type.GetDescriptor());
+ if (type.IsUnresolvedTypes()) {
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUnresolvedAndUninitializedThisReference() &&
+ cur_entry->GetDescriptor() == descriptor) {
+ return *cur_entry;
+ }
+ }
+ entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+ } else {
+ mirror::Class* klass = type.GetClass();
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) {
+ return *cur_entry;
+ }
+ }
+ entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+ cur_entry->IsPreciseConstant() == precise &&
+ (down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
+ return *cur_entry;
+ }
+ }
+ RegType* entry;
+ if (precise) {
+ entry = new PreciseConstType(value, entries_.size());
+ } else {
+ entry = new ImpreciseConstType(value, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
+ (down_cast<ConstantType*>(cur_entry))->ConstantValueLo() == value) {
+ return *cur_entry;
+ }
+ }
+ RegType* entry;
+ if (precise) {
+ entry = new PreciseConstLoType(value, entries_.size());
+ } else {
+ entry = new ImpreciseConstLoType(value, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+ for (size_t i = primitive_count_; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
+ (down_cast<ConstantType*>(cur_entry))->ConstantValueHi() == value) {
+ return *cur_entry;
+ }
+ }
+ RegType* entry;
+ if (precise) {
+ entry = new PreciseConstHiType(value, entries_.size());
+ } else {
+ entry = new ImpreciseConstHiType(value, entries_.size());
+ }
+ entries_.push_back(entry);
+ return *entry;
+}
+
+const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
+ CHECK(array.IsArrayTypes());
+ if (array.IsUnresolvedTypes()) {
+ const std::string& descriptor(array.GetDescriptor());
+ const std::string component(descriptor.substr(1, descriptor.size() - 1));
+ return FromDescriptor(loader, component.c_str(), false);
+ } else {
+ mirror::Class* klass = array.GetClass()->GetComponentType();
+ return FromClass(ClassHelper(klass).GetDescriptor(), klass,
+ klass->CannotBeAssignedFromOtherTypes());
+ }
+}
+
+void RegTypeCache::Dump(std::ostream& os) {
+ for (size_t i = 0; i < entries_.size(); i++) {
+ RegType* cur_entry = entries_[i];
+ if (cur_entry != NULL) {
+ os << i << ": " << cur_entry->Dump() << "\n";
+ }
+ }
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
new file mode 100644
index 0000000..d70123c
--- /dev/null
+++ b/runtime/verifier/reg_type_cache.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_REG_TYPE_CACHE_H_
+#define ART_SRC_VERIFIER_REG_TYPE_CACHE_H_
+
+#include "base/casts.h"
+#include "base/macros.h"
+#include "base/stl_util.h"
+#include "reg_type.h"
+#include "runtime.h"
+
+#include <stdint.h>
+#include <vector>
+
+namespace art {
+namespace mirror {
+class Class;
+class ClassLoader;
+} // namespace mirror
+namespace verifier {
+
+class RegType;
+
+const size_t kNumPrimitives = 12;
+class RegTypeCache {
+ public:
+ explicit RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+ entries_.reserve(64);
+ FillPrimitiveTypes();
+ }
+ ~RegTypeCache();
+ static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if(!RegTypeCache::primitive_initialized_) {
+ CHECK_EQ(RegTypeCache::primitive_count_, 0);
+ CreatePrimitiveTypes();
+ CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitives);
+ RegTypeCache::primitive_initialized_ = true;
+ }
+ }
+ static void ShutDown();
+ const art::verifier::RegType& GetFromId(uint16_t id) const;
+ const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template <class Type>
+ static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FillPrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromCat1Const(int32_t value, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromCat2ConstLo(int32_t value, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromCat2ConstHi(int32_t value, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromUnresolvedSuperClass(const RegType& child)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // String is final and therefore always precise.
+ return From(NULL, "Ljava/lang/String;", true);
+ }
+ const RegType& JavaLangThrowable(bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return From(NULL, "Ljava/lang/Throwable;", precise);
+ }
+ const RegType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FromCat1Const(0, true);
+ }
+ size_t GetCacheSize() {
+ return entries_.size();
+ }
+ const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return *BooleanType::GetInstance();
+ }
+ const RegType& Byte() {
+ return *ByteType::GetInstance();
+ }
+ const RegType& Char() {
+ return *CharType::GetInstance();
+ }
+ const RegType& Short() {
+ return *ShortType::GetInstance();
+ }
+ const RegType& Integer() {
+ return *IntegerType::GetInstance();
+ }
+ const RegType& Float() {
+ return *FloatType::GetInstance();
+ }
+ const RegType& LongLo() {
+ return *LongLoType::GetInstance();
+ }
+ const RegType& LongHi() {
+ return *LongHiType::GetInstance();
+ }
+ const RegType& DoubleLo() {
+ return *DoubleLoType::GetInstance();
+ }
+ const RegType& DoubleHi() {
+ return *DoubleHiType::GetInstance();
+ }
+ const RegType& Undefined() {
+ return *UndefinedType::GetInstance();
+ }
+ const RegType& Conflict() {
+ return *ConflictType::GetInstance();
+ }
+ const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return From(NULL, "Ljava/lang/Class;", precise);
+ }
+ const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return From(NULL, "Ljava/lang/Object;", precise);
+ }
+ const RegType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Create an uninitialized 'this' argument for the given type.
+ const RegType& UninitializedThisArgument(const RegType& type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& FromUninitialized(const RegType& uninit_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+
+ private:
+ std::vector<RegType*> entries_;
+ static bool primitive_initialized_;
+ static uint16_t primitive_start_;
+ static uint16_t primitive_count_;
+ static void CreatePrimitiveTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Whether or not we're allowed to load classes.
+ const bool can_load_classes_;
+ DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
+ mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void ClearException();
+ bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_REG_TYPE_CACHE_H_
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
new file mode 100644
index 0000000..f37edff
--- /dev/null
+++ b/runtime/verifier/reg_type_test.cc
@@ -0,0 +1,490 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reg_type.h"
+#include "reg_type_cache-inl.h"
+
+#include "base/casts.h"
+#include "common_test.h"
+#include <set>
+
+namespace art {
+namespace verifier {
+
+class RegTypeTest : public CommonTest {};
+
+TEST_F(RegTypeTest, ConstLoHi) {
+ // Tests creating primitive types types.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+ const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+ const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+ const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+ EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
+ EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
+ EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
+
+ const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+ const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+ EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
+
+ const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+ const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+ const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+ EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
+ EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
+}
+
+TEST_F(RegTypeTest, Pairs) {
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ int64_t val = static_cast<int32_t>(1234);
+ const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+ const RegType& long_lo = cache.LongLo();
+ const RegType& long_hi = cache.LongHi();
+ //Check sanity of types.
+ EXPECT_TRUE(precise_lo.IsLowHalf());
+ EXPECT_FALSE(precise_hi.IsLowHalf());
+ EXPECT_FALSE(precise_lo.IsHighHalf());
+ EXPECT_TRUE(precise_hi.IsHighHalf());
+ EXPECT_TRUE(long_hi.IsLongHighTypes());
+ EXPECT_TRUE(precise_hi.IsLongHighTypes());
+ // Check Pairing.
+ EXPECT_FALSE(precise_lo.CheckWidePair(precise_const));
+ EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi));
+ // Test Merging.
+ EXPECT_TRUE((long_lo.Merge(precise_lo, &cache)).IsLongTypes());
+ EXPECT_TRUE((long_hi.Merge(precise_hi, &cache)).IsLongHighTypes());
+}
+
+TEST_F(RegTypeTest, Primitives) {
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+
+ const RegType& bool_reg_type = cache.Boolean();
+ EXPECT_FALSE(bool_reg_type.IsUndefined());
+ EXPECT_FALSE(bool_reg_type.IsConflict());
+ EXPECT_FALSE(bool_reg_type.IsZero());
+ EXPECT_FALSE(bool_reg_type.IsOne());
+ EXPECT_FALSE(bool_reg_type.IsLongConstant());
+ EXPECT_TRUE(bool_reg_type.IsBoolean());
+ EXPECT_FALSE(bool_reg_type.IsByte());
+ EXPECT_FALSE(bool_reg_type.IsChar());
+ EXPECT_FALSE(bool_reg_type.IsShort());
+ EXPECT_FALSE(bool_reg_type.IsInteger());
+ EXPECT_FALSE(bool_reg_type.IsLong());
+ EXPECT_FALSE(bool_reg_type.IsFloat());
+ EXPECT_FALSE(bool_reg_type.IsDouble());
+ EXPECT_FALSE(bool_reg_type.IsReference());
+ EXPECT_FALSE(bool_reg_type.IsLowHalf());
+ EXPECT_FALSE(bool_reg_type.IsHighHalf());
+ EXPECT_FALSE(bool_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(bool_reg_type.IsReferenceTypes());
+ EXPECT_TRUE(bool_reg_type.IsCategory1Types());
+ EXPECT_FALSE(bool_reg_type.IsCategory2Types());
+ EXPECT_TRUE(bool_reg_type.IsBooleanTypes());
+ EXPECT_TRUE(bool_reg_type.IsByteTypes());
+ EXPECT_TRUE(bool_reg_type.IsShortTypes());
+ EXPECT_TRUE(bool_reg_type.IsCharTypes());
+ EXPECT_TRUE(bool_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(bool_reg_type.IsFloatTypes());
+ EXPECT_FALSE(bool_reg_type.IsLongTypes());
+ EXPECT_FALSE(bool_reg_type.IsDoubleTypes());
+ EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& byte_reg_type = cache.Byte();
+ EXPECT_FALSE(byte_reg_type.IsUndefined());
+ EXPECT_FALSE(byte_reg_type.IsConflict());
+ EXPECT_FALSE(byte_reg_type.IsZero());
+ EXPECT_FALSE(byte_reg_type.IsOne());
+ EXPECT_FALSE(byte_reg_type.IsLongConstant());
+ EXPECT_FALSE(byte_reg_type.IsBoolean());
+ EXPECT_TRUE(byte_reg_type.IsByte());
+ EXPECT_FALSE(byte_reg_type.IsChar());
+ EXPECT_FALSE(byte_reg_type.IsShort());
+ EXPECT_FALSE(byte_reg_type.IsInteger());
+ EXPECT_FALSE(byte_reg_type.IsLong());
+ EXPECT_FALSE(byte_reg_type.IsFloat());
+ EXPECT_FALSE(byte_reg_type.IsDouble());
+ EXPECT_FALSE(byte_reg_type.IsReference());
+ EXPECT_FALSE(byte_reg_type.IsLowHalf());
+ EXPECT_FALSE(byte_reg_type.IsHighHalf());
+ EXPECT_FALSE(byte_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(byte_reg_type.IsReferenceTypes());
+ EXPECT_TRUE(byte_reg_type.IsCategory1Types());
+ EXPECT_FALSE(byte_reg_type.IsCategory2Types());
+ EXPECT_FALSE(byte_reg_type.IsBooleanTypes());
+ EXPECT_TRUE(byte_reg_type.IsByteTypes());
+ EXPECT_TRUE(byte_reg_type.IsShortTypes());
+ EXPECT_FALSE(byte_reg_type.IsCharTypes());
+ EXPECT_TRUE(byte_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(byte_reg_type.IsFloatTypes());
+ EXPECT_FALSE(byte_reg_type.IsLongTypes());
+ EXPECT_FALSE(byte_reg_type.IsDoubleTypes());
+ EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& char_reg_type = cache.Char();
+ EXPECT_FALSE(char_reg_type.IsUndefined());
+ EXPECT_FALSE(char_reg_type.IsConflict());
+ EXPECT_FALSE(char_reg_type.IsZero());
+ EXPECT_FALSE(char_reg_type.IsOne());
+ EXPECT_FALSE(char_reg_type.IsLongConstant());
+ EXPECT_FALSE(char_reg_type.IsBoolean());
+ EXPECT_FALSE(char_reg_type.IsByte());
+ EXPECT_TRUE(char_reg_type.IsChar());
+ EXPECT_FALSE(char_reg_type.IsShort());
+ EXPECT_FALSE(char_reg_type.IsInteger());
+ EXPECT_FALSE(char_reg_type.IsLong());
+ EXPECT_FALSE(char_reg_type.IsFloat());
+ EXPECT_FALSE(char_reg_type.IsDouble());
+ EXPECT_FALSE(char_reg_type.IsReference());
+ EXPECT_FALSE(char_reg_type.IsLowHalf());
+ EXPECT_FALSE(char_reg_type.IsHighHalf());
+ EXPECT_FALSE(char_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(char_reg_type.IsReferenceTypes());
+ EXPECT_TRUE(char_reg_type.IsCategory1Types());
+ EXPECT_FALSE(char_reg_type.IsCategory2Types());
+ EXPECT_FALSE(char_reg_type.IsBooleanTypes());
+ EXPECT_FALSE(char_reg_type.IsByteTypes());
+ EXPECT_FALSE(char_reg_type.IsShortTypes());
+ EXPECT_TRUE(char_reg_type.IsCharTypes());
+ EXPECT_TRUE(char_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(char_reg_type.IsFloatTypes());
+ EXPECT_FALSE(char_reg_type.IsLongTypes());
+ EXPECT_FALSE(char_reg_type.IsDoubleTypes());
+ EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& short_reg_type = cache.Short();
+ EXPECT_FALSE(short_reg_type.IsUndefined());
+ EXPECT_FALSE(short_reg_type.IsConflict());
+ EXPECT_FALSE(short_reg_type.IsZero());
+ EXPECT_FALSE(short_reg_type.IsOne());
+ EXPECT_FALSE(short_reg_type.IsLongConstant());
+ EXPECT_FALSE(short_reg_type.IsBoolean());
+ EXPECT_FALSE(short_reg_type.IsByte());
+ EXPECT_FALSE(short_reg_type.IsChar());
+ EXPECT_TRUE(short_reg_type.IsShort());
+ EXPECT_FALSE(short_reg_type.IsInteger());
+ EXPECT_FALSE(short_reg_type.IsLong());
+ EXPECT_FALSE(short_reg_type.IsFloat());
+ EXPECT_FALSE(short_reg_type.IsDouble());
+ EXPECT_FALSE(short_reg_type.IsReference());
+ EXPECT_FALSE(short_reg_type.IsLowHalf());
+ EXPECT_FALSE(short_reg_type.IsHighHalf());
+ EXPECT_FALSE(short_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(short_reg_type.IsReferenceTypes());
+ EXPECT_TRUE(short_reg_type.IsCategory1Types());
+ EXPECT_FALSE(short_reg_type.IsCategory2Types());
+ EXPECT_FALSE(short_reg_type.IsBooleanTypes());
+ EXPECT_FALSE(short_reg_type.IsByteTypes());
+ EXPECT_TRUE(short_reg_type.IsShortTypes());
+ EXPECT_FALSE(short_reg_type.IsCharTypes());
+ EXPECT_TRUE(short_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(short_reg_type.IsFloatTypes());
+ EXPECT_FALSE(short_reg_type.IsLongTypes());
+ EXPECT_FALSE(short_reg_type.IsDoubleTypes());
+ EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& int_reg_type = cache.Integer();
+ EXPECT_FALSE(int_reg_type.IsUndefined());
+ EXPECT_FALSE(int_reg_type.IsConflict());
+ EXPECT_FALSE(int_reg_type.IsZero());
+ EXPECT_FALSE(int_reg_type.IsOne());
+ EXPECT_FALSE(int_reg_type.IsLongConstant());
+ EXPECT_FALSE(int_reg_type.IsBoolean());
+ EXPECT_FALSE(int_reg_type.IsByte());
+ EXPECT_FALSE(int_reg_type.IsChar());
+ EXPECT_FALSE(int_reg_type.IsShort());
+ EXPECT_TRUE(int_reg_type.IsInteger());
+ EXPECT_FALSE(int_reg_type.IsLong());
+ EXPECT_FALSE(int_reg_type.IsFloat());
+ EXPECT_FALSE(int_reg_type.IsDouble());
+ EXPECT_FALSE(int_reg_type.IsReference());
+ EXPECT_FALSE(int_reg_type.IsLowHalf());
+ EXPECT_FALSE(int_reg_type.IsHighHalf());
+ EXPECT_FALSE(int_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(int_reg_type.IsReferenceTypes());
+ EXPECT_TRUE(int_reg_type.IsCategory1Types());
+ EXPECT_FALSE(int_reg_type.IsCategory2Types());
+ EXPECT_FALSE(int_reg_type.IsBooleanTypes());
+ EXPECT_FALSE(int_reg_type.IsByteTypes());
+ EXPECT_FALSE(int_reg_type.IsShortTypes());
+ EXPECT_FALSE(int_reg_type.IsCharTypes());
+ EXPECT_TRUE(int_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(int_reg_type.IsFloatTypes());
+ EXPECT_FALSE(int_reg_type.IsLongTypes());
+ EXPECT_FALSE(int_reg_type.IsDoubleTypes());
+ EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& long_reg_type = cache.LongLo();
+ EXPECT_FALSE(long_reg_type.IsUndefined());
+ EXPECT_FALSE(long_reg_type.IsConflict());
+ EXPECT_FALSE(long_reg_type.IsZero());
+ EXPECT_FALSE(long_reg_type.IsOne());
+ EXPECT_FALSE(long_reg_type.IsLongConstant());
+ EXPECT_FALSE(long_reg_type.IsBoolean());
+ EXPECT_FALSE(long_reg_type.IsByte());
+ EXPECT_FALSE(long_reg_type.IsChar());
+ EXPECT_FALSE(long_reg_type.IsShort());
+ EXPECT_FALSE(long_reg_type.IsInteger());
+ EXPECT_TRUE(long_reg_type.IsLong());
+ EXPECT_FALSE(long_reg_type.IsFloat());
+ EXPECT_FALSE(long_reg_type.IsDouble());
+ EXPECT_FALSE(long_reg_type.IsReference());
+ EXPECT_TRUE(long_reg_type.IsLowHalf());
+ EXPECT_FALSE(long_reg_type.IsHighHalf());
+ EXPECT_TRUE(long_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(long_reg_type.IsReferenceTypes());
+ EXPECT_FALSE(long_reg_type.IsCategory1Types());
+ EXPECT_TRUE(long_reg_type.IsCategory2Types());
+ EXPECT_FALSE(long_reg_type.IsBooleanTypes());
+ EXPECT_FALSE(long_reg_type.IsByteTypes());
+ EXPECT_FALSE(long_reg_type.IsShortTypes());
+ EXPECT_FALSE(long_reg_type.IsCharTypes());
+ EXPECT_FALSE(long_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(long_reg_type.IsFloatTypes());
+ EXPECT_TRUE(long_reg_type.IsLongTypes());
+ EXPECT_FALSE(long_reg_type.IsDoubleTypes());
+ EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& float_reg_type = cache.Float();
+ EXPECT_FALSE(float_reg_type.IsUndefined());
+ EXPECT_FALSE(float_reg_type.IsConflict());
+ EXPECT_FALSE(float_reg_type.IsZero());
+ EXPECT_FALSE(float_reg_type.IsOne());
+ EXPECT_FALSE(float_reg_type.IsLongConstant());
+ EXPECT_FALSE(float_reg_type.IsBoolean());
+ EXPECT_FALSE(float_reg_type.IsByte());
+ EXPECT_FALSE(float_reg_type.IsChar());
+ EXPECT_FALSE(float_reg_type.IsShort());
+ EXPECT_FALSE(float_reg_type.IsInteger());
+ EXPECT_FALSE(float_reg_type.IsLong());
+ EXPECT_TRUE(float_reg_type.IsFloat());
+ EXPECT_FALSE(float_reg_type.IsDouble());
+ EXPECT_FALSE(float_reg_type.IsReference());
+ EXPECT_FALSE(float_reg_type.IsLowHalf());
+ EXPECT_FALSE(float_reg_type.IsHighHalf());
+ EXPECT_FALSE(float_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(float_reg_type.IsReferenceTypes());
+ EXPECT_TRUE(float_reg_type.IsCategory1Types());
+ EXPECT_FALSE(float_reg_type.IsCategory2Types());
+ EXPECT_FALSE(float_reg_type.IsBooleanTypes());
+ EXPECT_FALSE(float_reg_type.IsByteTypes());
+ EXPECT_FALSE(float_reg_type.IsShortTypes());
+ EXPECT_FALSE(float_reg_type.IsCharTypes());
+ EXPECT_FALSE(float_reg_type.IsIntegralTypes());
+ EXPECT_TRUE(float_reg_type.IsFloatTypes());
+ EXPECT_FALSE(float_reg_type.IsLongTypes());
+ EXPECT_FALSE(float_reg_type.IsDoubleTypes());
+ EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
+
+ const RegType& double_reg_type = cache.DoubleLo();
+ EXPECT_FALSE(double_reg_type.IsUndefined());
+ EXPECT_FALSE(double_reg_type.IsConflict());
+ EXPECT_FALSE(double_reg_type.IsZero());
+ EXPECT_FALSE(double_reg_type.IsOne());
+ EXPECT_FALSE(double_reg_type.IsLongConstant());
+ EXPECT_FALSE(double_reg_type.IsBoolean());
+ EXPECT_FALSE(double_reg_type.IsByte());
+ EXPECT_FALSE(double_reg_type.IsChar());
+ EXPECT_FALSE(double_reg_type.IsShort());
+ EXPECT_FALSE(double_reg_type.IsInteger());
+ EXPECT_FALSE(double_reg_type.IsLong());
+ EXPECT_FALSE(double_reg_type.IsFloat());
+ EXPECT_TRUE(double_reg_type.IsDouble());
+ EXPECT_FALSE(double_reg_type.IsReference());
+ EXPECT_TRUE(double_reg_type.IsLowHalf());
+ EXPECT_FALSE(double_reg_type.IsHighHalf());
+ EXPECT_TRUE(double_reg_type.IsLongOrDoubleTypes());
+ EXPECT_FALSE(double_reg_type.IsReferenceTypes());
+ EXPECT_FALSE(double_reg_type.IsCategory1Types());
+ EXPECT_TRUE(double_reg_type.IsCategory2Types());
+ EXPECT_FALSE(double_reg_type.IsBooleanTypes());
+ EXPECT_FALSE(double_reg_type.IsByteTypes());
+ EXPECT_FALSE(double_reg_type.IsShortTypes());
+ EXPECT_FALSE(double_reg_type.IsCharTypes());
+ EXPECT_FALSE(double_reg_type.IsIntegralTypes());
+ EXPECT_FALSE(double_reg_type.IsFloatTypes());
+ EXPECT_FALSE(double_reg_type.IsLongTypes());
+ EXPECT_TRUE(double_reg_type.IsDoubleTypes());
+ EXPECT_FALSE(double_reg_type.IsArrayIndexTypes());
+ EXPECT_FALSE(double_reg_type.IsNonZeroReferenceTypes());
+}
+
+
+class RegTypeReferenceTest : public CommonTest {};
+
+TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
+ // Tests matching precisions. A reference type that was created precise doesn't
+ // match the one that is imprecise.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& imprecise_obj = cache.JavaLangObject(false);
+ const RegType& precise_obj = cache.JavaLangObject(true);
+ const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+
+ EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
+ EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
+ EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
+ EXPECT_FALSE(imprecise_obj.Equals(precise_obj_2));
+}
+
+TEST_F(RegTypeReferenceTest, UnresolvedType) {
+ // Tests creating unresolved types. Miss for the first time asking the cache and
+ // a hit second time.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
+ EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
+
+ const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
+
+ const RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
+ EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
+ EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
+}
+
+TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
+ // Tests creating types uninitialized types from unresolved types.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
+ const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ EXPECT_TRUE(ref_type_0.Equals(ref_type));
+ // Create an uninitialized type of this unresolved type
+ const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+ EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
+ EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
+ EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
+ // Create an uninitialized type of this unresolved type with different PC
+ const RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
+ EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
+ EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
+ // Create an uninitialized type of this unresolved type with the same PC
+ const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+ EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
+}
+
+TEST_F(RegTypeReferenceTest, Dump) {
+ // Tests types for proper Dump messages.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+ const RegType& resolved_ref = cache.JavaLangString();
+ const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+ const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+ const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+
+ std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
+ EXPECT_EQ(expected, unresolved_ref.Dump());
+ expected = "Precise Reference: java.lang.String";
+ EXPECT_EQ( expected, resolved_ref.Dump());
+ expected ="Uninitialized Reference: java.lang.String Allocation PC: 10";
+ EXPECT_EQ(expected, resolved_unintialiesd.Dump());
+ expected = "Unresolved And Uninitialized Reference: java.lang.DoesNotExist Allocation PC: 12";
+ EXPECT_EQ(expected, unresolved_unintialized.Dump());
+ expected = "UnresolvedMergedReferences(Unresolved Reference: java.lang.DoesNotExist, Unresolved Reference: java.lang.DoesNotExistEither)";
+ EXPECT_EQ(expected, unresolved_merged.Dump());
+}
+
+
+TEST_F(RegTypeReferenceTest, JavalangString) {
+ // Add a class to the cache then look for the same class and make sure it is a
+ // Hit the second time. Then check for the same effect when using
+ // The JavaLangObject method instead of FromDescriptor. String class is final.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& ref_type = cache.JavaLangString();
+ const RegType& ref_type_2 = cache.JavaLangString();
+ const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+
+ EXPECT_TRUE(ref_type.Equals(ref_type_2));
+ EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
+ EXPECT_TRUE(ref_type.IsPreciseReference());
+
+ // Create an uninitialized type out of this:
+ const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+ EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
+ EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
+
+}
+TEST_F(RegTypeReferenceTest, JavalangObject) {
+ // Add a class to the cache then look for the same class and make sure it is a
+ // Hit the second time. Then I am checking for the same effect when using
+ // The JavaLangObject method instead of FromDescriptor. Object Class in not final.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache(true);
+ const RegType& ref_type = cache.JavaLangObject(true);
+ const RegType& ref_type_2 = cache.JavaLangObject(true);
+ const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+
+ EXPECT_TRUE(ref_type.Equals(ref_type_2));
+ EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
+ EXPECT_EQ(ref_type.GetId(), ref_type_3.GetId());
+}
+TEST_F(RegTypeReferenceTest, Merging) {
+ // Tests merging logic
+ // String and object , LUB is object.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache_new(true);
+ const RegType& string = cache_new.JavaLangString();
+ const RegType& Object = cache_new.JavaLangObject(true);
+ EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
+ // Merge two unresolved types.
+ const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
+ const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+ EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
+
+ const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+ EXPECT_TRUE(merged.IsUnresolvedMergedReference());
+ RegType& merged_nonconst = const_cast<RegType&>(merged);
+
+ std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
+ EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
+ EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
+}
+
+
+TEST_F(RegTypeTest, ConstPrecision) {
+
+ // Tests creating primitive types types.
+ ScopedObjectAccess soa(Thread::Current());
+ RegTypeCache cache_new(true);
+ const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+ const RegType& precise_const = cache_new.FromCat1Const(10, true);
+
+ EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
+ EXPECT_TRUE(precise_const.IsPreciseConstant());
+ EXPECT_FALSE(imprecise_const.Equals(precise_const));
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
new file mode 100644
index 0000000..157e136
--- /dev/null
+++ b/runtime/verifier/register_line-inl.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_REGISTER_LINE_INL_H_
+#define ART_SRC_VERIFIER_REGISTER_LINE_INL_H_
+
+#include "register_line.h"
+#include "method_verifier.h"
+
+namespace art {
+namespace verifier {
+
+inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+ // The register index was validated during the static pass, so we don't need to check it here.
+ DCHECK_LT(vsrc, num_regs_);
+ return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
+}
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_REGISTER_LINE_INL_H_
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
new file mode 100644
index 0000000..3a2145b
--- /dev/null
+++ b/runtime/verifier/register_line.cc
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_line.h"
+
+#include "dex_instruction-inl.h"
+#include "method_verifier.h"
+#include "register_line-inl.h"
+
+namespace art {
+namespace verifier {
+
+bool RegisterLine::CheckConstructorReturn() const {
+ for (size_t i = 0; i < num_regs_; i++) {
+ if (GetRegisterType(i).IsUninitializedThisReference() ||
+ GetRegisterType(i).IsUnresolvedAndUninitializedThisReference()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
+ << "Constructor returning without calling superclass constructor";
+ return false;
+ }
+ }
+ return true;
+}
+
+bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
+ DCHECK_LT(vdst, num_regs_);
+ if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Expected category1 register type not '"
+ << new_type << "'";
+ return false;
+ } else if (new_type.IsConflict()) { // should only be set during a merge
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type;
+ return false;
+ } else if (verifier_->CanLoadClasses() && !Runtime::Current()->IsCompiler() &&
+ new_type.IsUnresolvedTypes()) {
+ // Unresolvable classes at runtime are bad and marked as a rewrite error.
+ verifier_->Fail(VERIFY_ERROR_NO_CLASS) << "Set register to unresolved class '"
+ << new_type << "' at runtime";
+ return false;
+ } else {
+ line_[vdst] = new_type.GetId();
+ }
+ // Clear the monitor entry bits for this register.
+ ClearAllRegToLockDepths(vdst);
+ return true;
+}
+
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
+ const RegType& new_type2) {
+ DCHECK_LT(vdst, num_regs_);
+ if (!new_type1.CheckWidePair(new_type2)) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
+ << new_type1 << "' '" << new_type2 << "'";
+ return false;
+ } else {
+ line_[vdst] = new_type1.GetId();
+ line_[vdst + 1] = new_type2.GetId();
+ }
+ // Clear the monitor entry bits for this register.
+ ClearAllRegToLockDepths(vdst);
+ ClearAllRegToLockDepths(vdst + 1);
+ return true;
+}
+
+void RegisterLine::SetResultTypeToUnknown() {
+ result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
+ result_[1] = result_[0];
+}
+
+void RegisterLine::SetResultRegisterType(const RegType& new_type) {
+ DCHECK(!new_type.IsLowHalf());
+ DCHECK(!new_type.IsHighHalf());
+ result_[0] = new_type.GetId();
+ result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
+}
+
+void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
+ const RegType& new_type2) {
+ DCHECK(new_type1.CheckWidePair(new_type2));
+ result_[0] = new_type1.GetId();
+ result_[1] = new_type2.GetId();
+}
+
+const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+ const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
+ if (args_count < 1) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+ return verifier_->GetRegTypeCache()->Conflict();
+ }
+ /* get the element type of the array held in vsrc */
+ const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ const RegType& this_type = GetRegisterType(this_reg);
+ if (!this_type.IsReferenceTypes()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
+ << this_reg << " (type=" << this_type << ")";
+ return verifier_->GetRegTypeCache()->Conflict();
+ }
+ return this_type;
+}
+
+bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
+ const RegType& check_type) {
+ // Verify the src register type against the check type refining the type of the register
+ const RegType& src_type = GetRegisterType(vsrc);
+ if (!(check_type.IsAssignableFrom(src_type))) {
+ // Hard fail if one of the types is primitive, since they are concretely known.
+ enum VerifyError fail_type = (!check_type.IsNonZeroReferenceTypes() ||
+ !src_type.IsNonZeroReferenceTypes())
+ ? VERIFY_ERROR_BAD_CLASS_HARD
+ : VERIFY_ERROR_BAD_CLASS_SOFT;
+ verifier_->Fail(fail_type) << "register v" << vsrc << " has type "
+ << src_type << " but expected " << check_type;
+ return false;
+ }
+ if (check_type.IsLowHalf()) {
+ const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ if (!src_type.CheckWidePair(src_type_h)) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
+ << src_type << "/" << src_type_h;
+ return false;
+ }
+ }
+ // The register at vsrc has a defined type, we know the lower-upper-bound, but this is less
+ // precise than the subtype in vsrc so leave it for reference types. For primitive types
+ // if they are a defined type then they are as precise as we can get, however, for constant
+ // types we may wish to refine them. Unfortunately constant propagation has rendered this useless.
+ return true;
+}
+
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
+ const RegType& check_type2) {
+ DCHECK(check_type1.CheckWidePair(check_type2));
+ // Verify the src register type against the check type refining the type of the register
+ const RegType& src_type = GetRegisterType(vsrc);
+ if (!check_type1.IsAssignableFrom(src_type)) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
+ << " but expected " << check_type1;
+ return false;
+ }
+ const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ if (!src_type.CheckWidePair(src_type_h)) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
+ << src_type << "/" << src_type_h;
+ return false;
+ }
+ // The register at vsrc has a defined type, we know the lower-upper-bound, but this is less
+ // precise than the subtype in vsrc so leave it for reference types. For primitive types
+ // if they are a defined type then they are as precise as we can get, however, for constant
+ // types we may wish to refine them. Unfortunately constant propagation has rendered this useless.
+ return true;
+}
+
+void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+ DCHECK(uninit_type.IsUninitializedTypes());
+ const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+ size_t changed = 0;
+ for (size_t i = 0; i < num_regs_; i++) {
+ if (GetRegisterType(i).Equals(uninit_type)) {
+ line_[i] = init_type.GetId();
+ changed++;
+ }
+ }
+ DCHECK_GT(changed, 0u);
+}
+
+std::string RegisterLine::Dump() const {
+ std::string result;
+ for (size_t i = 0; i < num_regs_; i++) {
+ result += StringPrintf("%zd:[", i);
+ result += GetRegisterType(i).Dump();
+ result += "],";
+ }
+ typedef std::deque<uint32_t>::const_iterator It; // TODO: C++0x auto
+ for (It it = monitors_.begin(), end = monitors_.end(); it != end; ++it) {
+ result += StringPrintf("{%d},", *it);
+ }
+ return result;
+}
+
+void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+ for (size_t i = 0; i < num_regs_; i++) {
+ if (GetRegisterType(i).Equals(uninit_type)) {
+ line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
+ ClearAllRegToLockDepths(i);
+ }
+ }
+}
+
+void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
+ DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
+ const RegType& type = GetRegisterType(vsrc);
+ if (!SetRegisterType(vdst, type)) {
+ return;
+ }
+ if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
+ (cat == kTypeCategoryRef && !type.IsReferenceTypes())) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type
+ << " cat=" << static_cast<int>(cat);
+ } else if (cat == kTypeCategoryRef) {
+ CopyRegToLockDepth(vdst, vsrc);
+ }
+}
+
+void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
+ const RegType& type_l = GetRegisterType(vsrc);
+ const RegType& type_h = GetRegisterType(vsrc + 1);
+
+ if (!type_l.CheckWidePair(type_h)) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
+ << " type=" << type_l << "/" << type_h;
+ } else {
+ SetRegisterTypeWide(vdst, type_l, type_h);
+ }
+}
+
+void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
+ const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ if ((!is_reference && !type.IsCategory1Types()) ||
+ (is_reference && !type.IsReferenceTypes())) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "copyRes1 v" << vdst << "<- result0" << " type=" << type;
+ } else {
+ DCHECK(verifier_->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
+ SetRegisterType(vdst, type);
+ result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
+ }
+}
+
+/*
+ * Implement "move-result-wide". Copy the category-2 value from the result
+ * register to another register, and reset the result register.
+ */
+void RegisterLine::CopyResultRegister2(uint32_t vdst) {
+ const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+ if (!type_l.IsCategory2Types()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "copyRes2 v" << vdst << "<- result0" << " type=" << type_l;
+ } else {
+ DCHECK(type_l.CheckWidePair(type_h)); // Set should never allow this case
+ SetRegisterTypeWide(vdst, type_l, type_h); // also sets the high
+ result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
+ result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
+
+ }
+}
+
+void RegisterLine::CheckUnaryOp(const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type) {
+ if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
+ SetRegisterType(inst->VRegA_12x(), dst_type);
+ }
+}
+
+void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1, const RegType& src_type2) {
+ if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
+ SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
+ }
+}
+
+void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type) {
+ if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
+ SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
+ }
+}
+
+void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2) {
+ if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
+ SetRegisterType(inst->VRegA_12x(), dst_type);
+ }
+}
+
+void RegisterLine::CheckBinaryOp(const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2,
+ bool check_boolean_op) {
+ const uint32_t vregB = inst->VRegB_23x();
+ const uint32_t vregC = inst->VRegC_23x();
+ if (VerifyRegisterType(vregB, src_type1) &&
+ VerifyRegisterType(vregC, src_type2)) {
+ if (check_boolean_op) {
+ DCHECK(dst_type.IsInteger());
+ if (GetRegisterType(vregB).IsBooleanTypes() &&
+ GetRegisterType(vregC).IsBooleanTypes()) {
+ SetRegisterType(inst->VRegA_23x(), verifier_->GetRegTypeCache()->Boolean());
+ return;
+ }
+ }
+ SetRegisterType(inst->VRegA_23x(), dst_type);
+ }
+}
+
+void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2) {
+ if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
+ VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
+ SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
+ }
+}
+
+void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type) {
+ if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
+ VerifyRegisterType(inst->VRegC_23x(), int_type)) {
+ SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
+ }
+}
+
+void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
+ const RegType& dst_type, const RegType& src_type1,
+ const RegType& src_type2, bool check_boolean_op) {
+ const uint32_t vregA = inst->VRegA_12x();
+ const uint32_t vregB = inst->VRegB_12x();
+ if (VerifyRegisterType(vregA, src_type1) &&
+ VerifyRegisterType(vregB, src_type2)) {
+ if (check_boolean_op) {
+ DCHECK(dst_type.IsInteger());
+ if (GetRegisterType(vregA).IsBooleanTypes() &&
+ GetRegisterType(vregB).IsBooleanTypes()) {
+ SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
+ return;
+ }
+ }
+ SetRegisterType(vregA, dst_type);
+ }
+}
+
+void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2) {
+ const uint32_t vregA = inst->VRegA_12x();
+ const uint32_t vregB = inst->VRegB_12x();
+ if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
+ VerifyRegisterTypeWide(vregB, src_type2_1, src_type2_2)) {
+ SetRegisterTypeWide(vregA, dst_type1, dst_type2);
+ }
+}
+
+void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type) {
+ const uint32_t vregA = inst->VRegA_12x();
+ const uint32_t vregB = inst->VRegB_12x();
+ if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
+ VerifyRegisterType(vregB, int_type)) {
+ SetRegisterTypeWide(vregA, long_lo_type, long_hi_type);
+ }
+}
+
+void RegisterLine::CheckLiteralOp(const Instruction* inst,
+ const RegType& dst_type, const RegType& src_type,
+ bool check_boolean_op, bool is_lit16) {
+ const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
+ const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
+ if (VerifyRegisterType(vregB, src_type)) {
+ if (check_boolean_op) {
+ DCHECK(dst_type.IsInteger());
+ /* check vB with the call, then check the constant manually */
+ const uint32_t val = is_lit16 ? inst->VRegC_22s() : inst->VRegC_22b();
+ if (GetRegisterType(vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
+ SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
+ return;
+ }
+ }
+ SetRegisterType(vregA, dst_type);
+ }
+}
+
+void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
+ const RegType& reg_type = GetRegisterType(reg_idx);
+ if (!reg_type.IsReferenceTypes()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
+ } else if (monitors_.size() >= 32) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: " << monitors_.size();
+ } else {
+ SetRegToLockDepth(reg_idx, monitors_.size());
+ monitors_.push_back(insn_idx);
+ }
+}
+
+void RegisterLine::PopMonitor(uint32_t reg_idx) {
+ const RegType& reg_type = GetRegisterType(reg_idx);
+ if (!reg_type.IsReferenceTypes()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
+ } else if (monitors_.empty()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit stack underflow";
+ } else {
+ monitors_.pop_back();
+ if (!IsSetLockDepth(reg_idx, monitors_.size())) {
+ // Bug 3215458: Locks and unlocks are on objects, if that object is a literal then before
+ // format "036" the constant collector may create unlocks on the same object but referenced
+ // via different registers.
+ ((verifier_->DexFileVersion() >= 36) ? verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
+ : verifier_->LogVerifyInfo())
+ << "monitor-exit not unlocking the top of the monitor stack";
+ } else {
+ // Record the register was unlocked
+ ClearRegToLockDepth(reg_idx, monitors_.size());
+ }
+ }
+}
+
+bool RegisterLine::VerifyMonitorStackEmpty() {
+ if (MonitorStackDepth() != 0) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
+ return false;
+ } else {
+ return true;
+ }
+}
+
+bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
+ bool changed = false;
+ CHECK(NULL != incoming_line);
+ CHECK(NULL != line_.get());
+ for (size_t idx = 0; idx < num_regs_; idx++) {
+ if (line_[idx] != incoming_line->line_[idx]) {
+ const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+ const RegType& cur_type = GetRegisterType(idx);
+ const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+ changed = changed || !cur_type.Equals(new_type);
+ line_[idx] = new_type.GetId();
+ }
+ }
+ if (monitors_.size() != incoming_line->monitors_.size()) {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "mismatched stack depths (depth="
+ << MonitorStackDepth() << ", incoming depth=" << incoming_line->MonitorStackDepth() << ")";
+ } else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
+ for (uint32_t idx = 0; idx < num_regs_; idx++) {
+ size_t depths = reg_to_lock_depths_.count(idx);
+ size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
+ if (depths != incoming_depths) {
+ if (depths == 0 || incoming_depths == 0) {
+ reg_to_lock_depths_.erase(idx);
+ } else {
+ verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "mismatched stack depths for register v" << idx
+ << ": " << depths << " != " << incoming_depths;
+ break;
+ }
+ }
+ }
+ }
+ return changed;
+}
+
+void RegisterLine::WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_bytes) {
+ for (size_t i = 0; i < num_regs_; i += 8) {
+ uint8_t val = 0;
+ for (size_t j = 0; j < 8 && (i + j) < num_regs_; j++) {
+ // Note: we write 1 for a Reference but not for Null
+ if (GetRegisterType(i + j).IsNonZeroReferenceTypes()) {
+ val |= 1 << j;
+ }
+ }
+ if ((i / 8) >= max_bytes) {
+ DCHECK_EQ(0, val);
+ continue;
+ }
+ DCHECK_LT(i / 8, max_bytes) << "val=" << static_cast<uint32_t>(val);
+ data.push_back(val);
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ os << rhs.Dump();
+ return os;
+}
+
+} // namespace verifier
+} // namespace art
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
new file mode 100644
index 0000000..5f17049
--- /dev/null
+++ b/runtime/verifier/register_line.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_VERIFIER_REGISTER_LINE_H_
+#define ART_SRC_VERIFIER_REGISTER_LINE_H_
+
+#include <deque>
+#include <vector>
+
+#include "dex_instruction.h"
+#include "reg_type.h"
+#include "safe_map.h"
+#include "UniquePtr.h"
+
+namespace art {
+namespace verifier {
+
+class MethodVerifier;
+
+/*
+ * Register type categories, for type checking.
+ *
+ * The spec says category 1 includes boolean, byte, char, short, int, float, reference, and
+ * returnAddress. Category 2 includes long and double.
+ *
+ * We treat object references separately, so we have "category1nr". We don't support jsr/ret, so
+ * there is no "returnAddress" type.
+ */
+enum TypeCategory {
+ kTypeCategoryUnknown = 0,
+ kTypeCategory1nr = 1, // boolean, byte, char, short, int, float
+ kTypeCategory2 = 2, // long, double
+ kTypeCategoryRef = 3, // object reference
+};
+
+// During verification, we associate one of these with every "interesting" instruction. We track
+// the status of all registers, and (if the method has any monitor-enter instructions) maintain a
+// stack of entered monitors (identified by code unit offset).
+class RegisterLine {
+ public:
+ RegisterLine(size_t num_regs, MethodVerifier* verifier)
+ : line_(new uint16_t[num_regs]),
+ verifier_(verifier),
+ num_regs_(num_regs) {
+ memset(line_.get(), 0, num_regs_ * sizeof(uint16_t));
+ SetResultTypeToUnknown();
+ }
+
+ // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
+ void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
+ // copies both halves of the register.
+ void CopyRegister2(uint32_t vdst, uint32_t vsrc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Implement "move-result". Copy the category-1 value from the result register to another
+ // register, and reset the result register.
+ void CopyResultRegister1(uint32_t vdst, bool is_reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Implement "move-result-wide". Copy the category-2 value from the result register to another
+ // register, and reset the result register.
+ void CopyResultRegister2(uint32_t vdst)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Set the invisible result register to unknown
+ void SetResultTypeToUnknown() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
+ // part of a 64-bit value, register N+1 will be set to "newType+1".
+ // The register index was validated during the static pass, so we don't need to check it here.
+ bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /* Set the type of the "result" register. */
+ void SetResultRegisterType(const RegType& new_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get the type of register vsrc.
+ const RegType& GetRegisterType(uint32_t vsrc) const;
+
+ bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CopyFromLine(const RegisterLine* src) {
+ DCHECK_EQ(num_regs_, src->num_regs_);
+ memcpy(line_.get(), src->line_.get(), num_regs_ * sizeof(uint16_t));
+ monitors_ = src->monitors_;
+ reg_to_lock_depths_ = src->reg_to_lock_depths_;
+ }
+
+ std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void FillWithGarbage() {
+ memset(line_.get(), 0xf1, num_regs_ * sizeof(uint16_t));
+ while (!monitors_.empty()) {
+ monitors_.pop_back();
+ }
+ reg_to_lock_depths_.clear();
+ }
+
+ /*
+ * We're creating a new instance of class C at address A. Any registers holding instances
+ * previously created at address A must be initialized by now. If not, we mark them as "conflict"
+ * to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
+ * the new ones at the same time).
+ */
+ void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Update all registers holding "uninit_type" to instead hold the corresponding initialized
+ * reference type. This is called when an appropriate constructor is invoked -- all copies of
+ * the reference must be marked as initialized.
+ */
+ void MarkRefsAsInitialized(const RegType& uninit_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Check constraints on constructor return. Specifically, make sure that the "this" argument got
+ * initialized.
+ * The "this" argument to <init> uses code offset kUninitThisArgAddr, which puts it at the start
+ * of the list in slot 0. If we see a register with an uninitialized slot 0 reference, we know it
+ * somehow didn't get initialized.
+ */
+ bool CheckConstructorReturn() const;
+
+ // Compare two register lines. Returns 0 if they match.
+ // Using this for a sort is unwise, since the value can change based on machine endianness.
+ int CompareLine(const RegisterLine* line2) const {
+ DCHECK(monitors_ == line2->monitors_);
+ // TODO: DCHECK(reg_to_lock_depths_ == line2->reg_to_lock_depths_);
+ return memcmp(line_.get(), line2->line_.get(), num_regs_ * sizeof(uint16_t));
+ }
+
+ size_t NumRegs() const {
+ return num_regs_;
+ }
+
+ /*
+ * Get the "this" pointer from a non-static method invocation. This returns the RegType so the
+ * caller can decide whether it needs the reference to be initialized or not. (Can also return
+ * kRegTypeZero if the reference can only be zero at this point.)
+ *
+ * The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
+ * versions. We just need to make sure vA is >= 1 and then return vC.
+ */
+ const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Verify types for a simple two-register instruction (e.g. "neg-int").
+ * "dst_type" is stored into vA, and "src_type" is verified against vB.
+ */
+ void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
+ const RegType& src_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckUnaryOpWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1, const RegType& src_type2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckUnaryOpToWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckUnaryOpFromWide(const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Verify types for a simple three-register instruction (e.g. "add-int").
+ * "dst_type" is stored into vA, and "src_type1"/"src_type2" are verified
+ * against vB/vC.
+ */
+ void CheckBinaryOp(const Instruction* inst,
+ const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+ bool check_boolean_op)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckBinaryOpWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckBinaryOpWideShift(const Instruction* inst,
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
+ * are verified against vA/vB, then "dst_type" is stored into vA.
+ */
+ void CheckBinaryOp2addr(const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type1, const RegType& src_type2,
+ bool check_boolean_op)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckBinaryOp2addrWide(const Instruction* inst,
+ const RegType& dst_type1, const RegType& dst_type2,
+ const RegType& src_type1_1, const RegType& src_type1_2,
+ const RegType& src_type2_1, const RegType& src_type2_2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CheckBinaryOp2addrWideShift(const Instruction* inst,
+ const RegType& long_lo_type, const RegType& long_hi_type,
+ const RegType& int_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
+ * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8").
+ * "dst_type" is stored into vA, and "src_type" is verified against vB.
+ *
+ * If "check_boolean_op" is set, we use the constant value in vC.
+ */
+ void CheckLiteralOp(const Instruction* inst,
+ const RegType& dst_type, const RegType& src_type,
+ bool check_boolean_op, bool is_lit16)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
+ void PushMonitor(uint32_t reg_idx, int32_t insn_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
+ void PopMonitor(uint32_t reg_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Stack of currently held monitors and where they were locked
+ size_t MonitorStackDepth() const {
+ return monitors_.size();
+ }
+
+ // We expect no monitors to be held at certain points, such a method returns. Verify the stack
+ // is empty, failing and returning false if not.
+ bool VerifyMonitorStackEmpty();
+
+ bool MergeRegisters(const RegisterLine* incoming_line)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ size_t GetMaxNonZeroReferenceReg(size_t max_ref_reg) {
+ size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
+ for (; i < num_regs_; i++) {
+ if (GetRegisterType(i).IsNonZeroReferenceTypes()) {
+ max_ref_reg = i;
+ }
+ }
+ return max_ref_reg;
+ }
+
+ // Write a bit at each register location that holds a reference
+ void WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_bytes);
+
+ size_t GetMonitorEnterCount() {
+ return monitors_.size();
+ }
+
+ uint32_t GetMonitorEnterDexPc(size_t i) {
+ return monitors_[i];
+ }
+
+ private:
+ void CopyRegToLockDepth(size_t dst, size_t src) {
+ SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(src);
+ if (it != reg_to_lock_depths_.end()) {
+ reg_to_lock_depths_.Put(dst, it->second);
+ }
+ }
+
+ bool IsSetLockDepth(size_t reg, size_t depth) {
+ SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(reg);
+ if (it != reg_to_lock_depths_.end()) {
+ return (it->second & (1 << depth)) != 0;
+ } else {
+ return false;
+ }
+ }
+
+ void SetRegToLockDepth(size_t reg, size_t depth) {
+ CHECK_LT(depth, 32u);
+ DCHECK(!IsSetLockDepth(reg, depth));
+ SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(reg);
+ if (it == reg_to_lock_depths_.end()) {
+ reg_to_lock_depths_.Put(reg, 1 << depth);
+ } else {
+ it->second |= (1 << depth);
+ }
+ }
+
+ void ClearRegToLockDepth(size_t reg, size_t depth) {
+ CHECK_LT(depth, 32u);
+ DCHECK(IsSetLockDepth(reg, depth));
+ SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(reg);
+ DCHECK(it != reg_to_lock_depths_.end());
+ uint32_t depths = it->second ^ (1 << depth);
+ if (depths != 0) {
+ it->second = depths;
+ } else {
+ reg_to_lock_depths_.erase(it);
+ }
+ }
+
+ void ClearAllRegToLockDepths(size_t reg) {
+ reg_to_lock_depths_.erase(reg);
+ }
+
+ // Storage for the result register's type, valid after an invocation
+ uint16_t result_[2];
+
+ // An array of RegType Ids associated with each dex register
+ UniquePtr<uint16_t[]> line_;
+
+ // Back link to the verifier
+ MethodVerifier* verifier_;
+
+ // Length of reg_types_
+ const uint32_t num_regs_;
+ // A stack of monitor enter locations
+ std::deque<uint32_t> monitors_;
+ // A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
+ // stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
+ // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5
+ SafeMap<uint32_t, uint32_t> reg_to_lock_depths_;
+};
+std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs);
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_SRC_VERIFIER_REGISTER_LINE_H_