summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/builder.cc25
-rw-r--r--compiler/optimizing/code_generator_arm.cc97
-rw-r--r--compiler/optimizing/code_generator_arm64.cc1
-rw-r--r--compiler/optimizing/code_generator_x86.cc106
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc105
-rw-r--r--compiler/optimizing/nodes.h40
-rw-r--r--test/422-instanceof/expected.txt0
-rw-r--r--test/422-instanceof/info.txt1
-rw-r--r--test/422-instanceof/src/Main.java70
-rw-r--r--test/Android.run-test.mk1
10 files changed, 437 insertions, 9 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index fc7333f..05213a1 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1260,7 +1260,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(instruction.VRegB_21c(), is_referrers_class, dex_offset));
+ new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -1282,6 +1282,29 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::INSTANCE_OF: {
+ uint16_t type_index = instruction.VRegC_22c();
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_, 1);
+ temps.Add(cls);
+ current_block_->AddInstruction(
+ new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset));
+ UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ break;
+ }
+
default:
return false;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 91b28c4..8e6f8ea 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -212,8 +212,9 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
codegen->RestoreLiveRegisters(locations);
@@ -266,6 +267,49 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
};
+class TypeCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
+};
+
#undef __
#undef __
@@ -2577,5 +2621,54 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
+void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeARM* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(&zero, EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ cmp(out, ShifterOperand(cls));
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ b(&zero, NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, Location::RegisterLocation(out));
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ }
+ __ Bind(&zero);
+ __ LoadImmediate(out, 0);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e84346b..4fe954d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -550,6 +550,7 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
M(StaticFieldGet) \
M(StaticFieldSet) \
M(Throw) \
+ M(TypeCheck) \
M(TypeConversion) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 129c374..548d699 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -241,10 +241,12 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
codegen->RecordPcInfo(at_, dex_pc_);
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
+
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
@@ -266,6 +268,49 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
};
+class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ x86_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ codegen->RestoreLiveRegisters(locations);
+
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
+};
+
#undef __
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
@@ -2671,5 +2716,60 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
+void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeX86* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ __ movl(out, Address(obj, class_offset));
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, Location::RegisterLocation(out));
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ }
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index efe1ef2..b9891d6 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -224,10 +224,11 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
: QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
codegen->RecordPcInfo(at_, dex_pc_);
+ Location out = locations->Out();
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ x64_codegen->Move(out, Location::RegisterLocation(RAX));
}
codegen->RestoreLiveRegisters(locations);
@@ -281,6 +282,50 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
};
+class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ x64_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
+};
+
#undef __
#define __ reinterpret_cast<X86_64Assembler*>(GetAssembler())->
@@ -2661,5 +2706,59 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
+void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeX86_64* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ // Compare the class of `obj` with `cls`.
+ __ movl(out, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, Location::RegisterLocation(out.AsRegister()));
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ }
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 47ed8df..ecf8c37 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -524,6 +524,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
M(Throw, Instruction) \
+ M(TypeCheck, Instruction) \
M(TypeConversion, Instruction) \
#define FOR_EACH_INSTRUCTION(M) \
@@ -2325,6 +2326,45 @@ class HThrow : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
+class HTypeCheck : public HExpression<2> {
+ public:
+ explicit HTypeCheck(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HExpression(Primitive::kPrimBoolean, SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // TODO: Can we debug when doing a runtime instanceof check?
+ return false;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(TypeCheck);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HTypeCheck);
+};
+
+
class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
diff --git a/test/422-instanceof/expected.txt b/test/422-instanceof/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/422-instanceof/expected.txt
diff --git a/test/422-instanceof/info.txt b/test/422-instanceof/info.txt
new file mode 100644
index 0000000..b2f7ff1
--- /dev/null
+++ b/test/422-instanceof/info.txt
@@ -0,0 +1 @@
+Tests for instanceof bytecode.
diff --git a/test/422-instanceof/src/Main.java b/test/422-instanceof/src/Main.java
new file mode 100644
index 0000000..307c987
--- /dev/null
+++ b/test/422-instanceof/src/Main.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static Object a;
+
+ public static void assertTrue(boolean value) {
+ if (!value) {
+ throw new Error("Wrong result");
+ }
+ }
+
+ public static void assertFalse(boolean value) {
+ if (value) {
+ throw new Error("Wrong result");
+ }
+ }
+
+ public static boolean $opt$InstanceOfMain() {
+ return a instanceof Main;
+ }
+
+ public static boolean $opt$InstanceOfFinalClass() {
+ return a instanceof FinalClass;
+ }
+
+ public static void main(String[] args) {
+ $opt$TestMain();
+ $opt$TestFinalClass();
+ }
+
+ public static void $opt$TestMain() {
+ a = new Main();
+ assertTrue($opt$InstanceOfMain());
+ a = null;
+ assertFalse($opt$InstanceOfMain());
+ a = new MainChild();
+ assertTrue($opt$InstanceOfMain());
+ a = new Object();
+ assertFalse($opt$InstanceOfMain());
+ }
+
+ public static void $opt$TestFinalClass() {
+ a = new FinalClass();
+ assertTrue($opt$InstanceOfFinalClass());
+ a = null;
+ assertFalse($opt$InstanceOfFinalClass());
+ a = new Main();
+ assertFalse($opt$InstanceOfFinalClass());
+ a = new Object();
+ assertFalse($opt$InstanceOfFinalClass());
+ }
+
+ static class MainChild extends Main {}
+
+ static final class FinalClass {}
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index f74547b..ae8ff5e 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -440,6 +440,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
420-const-class \
421-exceptions \
421-large-frame \
+ 422-instanceof \
422-type-conversion \
700-LoadArgRegs \
701-easy-div-rem \