summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorRoland Levillain <rpl@google.com>2015-06-29 08:18:30 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2015-06-29 08:18:31 +0000
commitd735c41e95a5d89cc9dad0c78d7d052579d8bd41 (patch)
tree150a469926f1a0beac5194cb1b072a10e1d57ef3 /compiler
parenta10c713ec3d5349cb907c184ebbd66e93b2dda99 (diff)
parenta1935c4fa255b5c20f5e9b2abce6be2d0f7cb0a8 (diff)
downloadart-d735c41e95a5d89cc9dad0c78d7d052579d8bd41.zip
art-d735c41e95a5d89cc9dad0c78d7d052579d8bd41.tar.gz
art-d735c41e95a5d89cc9dad0c78d7d052579d8bd41.tar.bz2
Merge "MIPS: Initial version of optimizing compiler for MIPS64R6." into mnc-dev
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/optimizing/builder.cc15
-rw-r--r--compiler/optimizing/builder.h5
-rw-r--r--compiler/optimizing/code_generator.cc18
-rw-r--r--compiler/optimizing/code_generator_mips64.cc3250
-rw-r--r--compiler/optimizing/code_generator_mips64.h302
-rw-r--r--compiler/optimizing/codegen_test.cc17
-rw-r--r--compiler/optimizing/nodes.h13
-rw-r--r--compiler/optimizing/optimizing_compiler.cc1
-rw-r--r--compiler/optimizing/register_allocator.cc1
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc894
-rw-r--r--compiler/utils/mips64/assembler_mips64.h150
-rw-r--r--compiler/utils/mips64/constants_mips64.h2
13 files changed, 4415 insertions, 254 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3f5271d..5770edf 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -103,6 +103,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/code_generator.cc \
optimizing/code_generator_arm.cc \
optimizing/code_generator_arm64.cc \
+ optimizing/code_generator_mips64.cc \
optimizing/code_generator_x86.cc \
optimizing/code_generator_x86_64.cc \
optimizing/code_generator_utils.cc \
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index b564aca..d175efe 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -483,10 +483,11 @@ void HGraphBuilder::Binop_23x_shift(const Instruction& instruction,
void HGraphBuilder::Binop_23x_cmp(const Instruction& instruction,
Primitive::Type type,
- HCompare::Bias bias) {
+ HCompare::Bias bias,
+ uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- current_block_->AddInstruction(new (arena_) HCompare(type, first, second, bias));
+ current_block_->AddInstruction(new (arena_) HCompare(type, first, second, bias, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -2072,27 +2073,27 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
case Instruction::CMP_LONG: {
- Binop_23x_cmp(instruction, Primitive::kPrimLong, HCompare::kNoBias);
+ Binop_23x_cmp(instruction, Primitive::kPrimLong, HCompare::kNoBias, dex_pc);
break;
}
case Instruction::CMPG_FLOAT: {
- Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kGtBias);
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kGtBias, dex_pc);
break;
}
case Instruction::CMPG_DOUBLE: {
- Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kGtBias);
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kGtBias, dex_pc);
break;
}
case Instruction::CMPL_FLOAT: {
- Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kLtBias);
+ Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kLtBias, dex_pc);
break;
}
case Instruction::CMPL_DOUBLE: {
- Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kLtBias);
+ Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kLtBias, dex_pc);
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index b1ee824..052aaf8 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -117,7 +117,10 @@ class HGraphBuilder : public ValueObject {
template<typename T>
void Binop_23x_shift(const Instruction& instruction, Primitive::Type type);
- void Binop_23x_cmp(const Instruction& instruction, Primitive::Type type, HCompare::Bias bias);
+ void Binop_23x_cmp(const Instruction& instruction,
+ Primitive::Type type,
+ HCompare::Bias bias,
+ uint32_t dex_pc);
template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 508e770..ff04724 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -20,6 +20,7 @@
#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
+#include "code_generator_mips64.h"
#include "compiled_method.h"
#include "dex/verified_method.h"
#include "driver/dex_compilation_unit.h"
@@ -459,6 +460,11 @@ CodeGenerator* CodeGenerator::Create(HGraph* graph,
}
case kMips:
return nullptr;
+ case kMips64: {
+ return new mips64::CodeGeneratorMIPS64(graph,
+ *isa_features.AsMips64InstructionSetFeatures(),
+ compiler_options);
+ }
case kX86: {
return new x86::CodeGeneratorX86(graph,
*isa_features.AsX86InstructionSetFeatures(),
@@ -629,18 +635,18 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
if (instruction != nullptr) {
- // The code generated for some type conversions may call the
- // runtime, thus normally requiring a subsequent call to this
- // method. However, the method verifier does not produce PC
- // information for certain instructions, which are considered "atomic"
- // (they cannot join a GC).
+ // The code generated for some type conversions and comparisons
+ // may call the runtime, thus normally requiring a subsequent
+ // call to this method. However, the method verifier does not
+ // produce PC information for certain instructions, which are
+ // considered "atomic" (they cannot join a GC).
// Therefore we do not currently record PC information for such
// instructions. As this may change later, we added this special
// case so that code generators may nevertheless call
// CodeGenerator::RecordPcInfo without triggering an error in
// CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
// thereafter.
- if (instruction->IsTypeConversion()) {
+ if (instruction->IsTypeConversion() || instruction->IsCompare()) {
return;
}
if (instruction->IsRem()) {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
new file mode 100644
index 0000000..69a90ad
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -0,0 +1,3250 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips64.h"
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "gc/accounting/card_table.h"
+#include "intrinsics.h"
+#include "art_method.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils/mips64/assembler_mips64.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace mips64 {
+
+static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr GpuRegister kMethodRegisterArgument = A0;
+
+// We need extra temporary/scratch registers (in addition to AT) in some cases.
+static constexpr GpuRegister TMP = T8;
+static constexpr FpuRegister FTMP = F8;
+
+// ART Thread Register.
+static constexpr GpuRegister TR = S1;
+
+Location Mips64ReturnLocation(Primitive::Type return_type) {
+ switch (return_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong:
+ return Location::RegisterLocation(V0);
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ return Location::FpuRegisterLocation(F0);
+
+ case Primitive::kPrimVoid:
+ return Location();
+ }
+ UNREACHABLE();
+}
+
+Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
+ return Mips64ReturnLocation(type);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ }
+
+ if (Primitive::IsFloatingPointType(type) &&
+ (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
+ next_location = Location::FpuRegisterLocation(
+ calling_convention.GetFpuRegisterAt(float_index_++));
+ gp_index_++;
+ } else if (!Primitive::IsFloatingPointType(type) &&
+ (gp_index_ < calling_convention.GetNumberOfRegisters())) {
+ next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
+ float_index_++;
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
+ }
+
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
+
+ // TODO: review
+
+ // TODO: shouldn't we use a whole machine word per argument on the stack?
+ // Implicit 4-byte method pointer (and such) will cause misalignment.
+
+ return next_location;
+}
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
+ return Mips64ReturnLocation(type);
+}
+
+#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
+
+class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction,
+ Location index_location,
+ Location length_location)
+ : instruction_(instruction),
+ index_location_(index_location),
+ length_location_(length_location) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ __ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+ }
+
+ private:
+ HBoundsCheck* const instruction_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
+};
+
+class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ __ Bind(GetEntryLabel());
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+ }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
+};
+
+class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ LoadClassSlowPathMIPS64(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
+ int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
+ if (do_clinit_) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ } else {
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ }
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ Primitive::Type type = at_->GetType();
+ mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
+};
+
+class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ Primitive::Type type = instruction_->GetType();
+ mips64_codegen->MoveLocation(locations->Out(),
+ calling_convention.GetReturnLocation(type),
+ type);
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
+};
+
+class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ __ Bind(GetEntryLabel());
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
+ }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
+};
+
+class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction,
+ HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, instruction_->GetLocations());
+ if (successor_ == nullptr) {
+ __ B(GetReturnLabel());
+ } else {
+ __ B(mips64_codegen->GetLabelOf(successor_));
+ }
+ }
+
+ Label* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
+};
+
+class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ TypeCheckSlowPathMIPS64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
+ : instruction_(instruction),
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
+
+ if (instruction_->IsInstanceOf()) {
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc_,
+ this);
+ Primitive::Type ret_type = instruction_->GetType();
+ Location ret_loc = calling_convention.GetReturnLocation(ret_type);
+ mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial,
+ uint32_t,
+ const mirror::Class*,
+ const mirror::Class*>();
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+ CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ private:
+ HInstruction* const instruction_;
+ const Location class_to_check_;
+ const Location object_class_;
+ uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
+};
+
+class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
+ : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ DCHECK(instruction_->IsDeoptimize());
+ HDeoptimize* deoptimize = instruction_->AsDeoptimize();
+ uint32_t dex_pc = deoptimize->GetDexPc();
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ }
+
+ private:
+ HInstruction* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
+};
+
+CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
+ const Mips64InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : CodeGenerator(graph,
+ kNumberOfGpuRegisters,
+ kNumberOfFpuRegisters,
+ 0, // kNumberOfRegisterPairs
+ ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+ arraysize(kCoreCalleeSaves)),
+ ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+ arraysize(kFpuCalleeSaves)),
+ compiler_options),
+ block_labels_(graph->GetArena(), 0),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this),
+ isa_features_(isa_features) {
+ // Save RA (containing the return address) to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(RA));
+}
+
+#undef __
+#define __ down_cast<Mips64Assembler*>(GetAssembler())->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
+
+void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
+ CodeGenerator::Finalize(allocator);
+}
+
+Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
+ MoveOperands* move = moves_.Get(index);
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
+}
+
+void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
+ MoveOperands* move = moves_.Get(index);
+ codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
+}
+
+void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
+ // Pop reg
+ __ Ld(GpuRegister(reg), SP, 0);
+ __ DecreaseFrameSize(kMips64WordSize);
+}
+
+void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
+ // Push reg
+ __ IncreaseFrameSize(kMips64WordSize);
+ __ Sd(GpuRegister(reg), SP, 0);
+}
+
+void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
+ LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
+ StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
+ // Allocate a scratch register other than TMP, if available.
+ // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
+ // automatically unspilled when the scratch scope object is destroyed).
+ ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
+ // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
+ int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
+ __ LoadFromOffset(load_type,
+ GpuRegister(ensure_scratch.GetRegister()),
+ SP,
+ index1 + stack_offset);
+ __ LoadFromOffset(load_type,
+ TMP,
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(store_type,
+ GpuRegister(ensure_scratch.GetRegister()),
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
+}
+
+static dwarf::Reg DWARFReg(GpuRegister reg) {
+ return dwarf::Reg::Mips64Core(static_cast<int>(reg));
+}
+
+// TODO: mapping of floating-point registers to DWARF
+
+void CodeGeneratorMIPS64::GenerateFrameEntry() {
+ __ Bind(&frame_entry_label_);
+
+ bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
+
+ if (do_overflow_check) {
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
+ RecordPcInfo(nullptr, 0);
+ }
+
+ // TODO: anything related to T9/GP/GOT/PIC/.so's?
+
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ // Make sure the frame size isn't unreasonably large. Per the various APIs
+ // it looks like it should always be less than 2GB in size, which allows
+ // us using 32-bit signed offsets from the stack pointer.
+ if (GetFrameSize() > 0x7FFFFFFF)
+ LOG(FATAL) << "Stack frame larger than 2GB";
+
+ // Spill callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = FrameEntrySpillSize();
+ __ IncreaseFrameSize(ofs);
+
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+ GpuRegister reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ ofs -= kMips64WordSize;
+ __ Sd(reg, SP, ofs);
+ __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+ FpuRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ ofs -= kMips64WordSize;
+ __ Sdc1(reg, SP, ofs);
+ // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ // Allocate the rest of the frame and store the current method pointer
+ // at its end.
+
+ __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+}
+
+void CodeGeneratorMIPS64::GenerateFrameExit() {
+ __ cfi().RememberState();
+
+ // TODO: anything related to T9/GP/GOT/PIC/.so's?
+
+ if (!HasEmptyFrame()) {
+ // Deallocate the rest of the frame.
+
+ __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ // Restore callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = 0;
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ FpuRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ __ Ldc1(reg, SP, ofs);
+ ofs += kMips64WordSize;
+ // TODO: __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ GpuRegister reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ Ld(reg, SP, ofs);
+ ofs += kMips64WordSize;
+ __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ DCHECK_EQ(ofs, FrameEntrySpillSize());
+ __ DecreaseFrameSize(ofs);
+ }
+
+ __ Jr(RA);
+
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorMIPS64::MoveLocation(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ // A valid move can always be inferred from the destination and source
+ // locations. When moving from and to a register, the argument type can be
+ // used to generate 32bit instead of 64bit moves.
+ bool unspecified_type = (type == Primitive::kPrimVoid);
+ DCHECK_EQ(unspecified_type, false);
+
+ if (destination.IsRegister() || destination.IsFpuRegister()) {
+ if (unspecified_type) {
+ HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
+ if (source.IsStackSlot() ||
+ (src_cst != nullptr && (src_cst->IsIntConstant()
+ || src_cst->IsFloatConstant()
+ || src_cst->IsNullConstant()))) {
+ // For stack slots and 32bit constants, a 64bit type is appropriate.
+ type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ } else {
+ // If the source is a double stack slot or a 64bit constant, a 64bit
+ // type is appropriate. Else the source is a register, and since the
+ // type has not been specified, we chose a 64bit type to force a 64bit
+ // move.
+ type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ }
+ }
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
+ if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ // Move to GPR/FPR from stack
+ LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
+ if (Primitive::IsFloatingPointType(type)) {
+ __ LoadFpuFromOffset(load_type,
+ destination.AsFpuRegister<FpuRegister>(),
+ SP,
+ source.GetStackIndex());
+ } else {
+ // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
+ __ LoadFromOffset(load_type,
+ destination.AsRegister<GpuRegister>(),
+ SP,
+ source.GetStackIndex());
+ }
+ } else if (source.IsConstant()) {
+ // Move to GPR/FPR from constant
+ GpuRegister gpr = AT;
+ if (!Primitive::IsFloatingPointType(type)) {
+ gpr = destination.AsRegister<GpuRegister>();
+ }
+ if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
+ __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
+ } else {
+ __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
+ }
+ if (type == Primitive::kPrimFloat) {
+ __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
+ } else if (type == Primitive::kPrimDouble) {
+ __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
+ }
+ } else {
+ if (destination.IsRegister()) {
+ // Move to GPR from GPR
+ __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
+ } else {
+ // Move to FPR from FPR
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ }
+ }
+ }
+ } else { // The destination is not a register. It must be a stack slot.
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister() || source.IsFpuRegister()) {
+ if (unspecified_type) {
+ if (source.IsRegister()) {
+ type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ } else {
+ type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ }
+ }
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
+ // Move to stack from GPR/FPR
+ StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
+ if (source.IsRegister()) {
+ __ StoreToOffset(store_type,
+ source.AsRegister<GpuRegister>(),
+ SP,
+ destination.GetStackIndex());
+ } else {
+ __ StoreFpuToOffset(store_type,
+ source.AsFpuRegister<FpuRegister>(),
+ SP,
+ destination.GetStackIndex());
+ }
+ } else if (source.IsConstant()) {
+ // Move to stack from constant
+ HConstant* src_cst = source.GetConstant();
+ StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
+ if (destination.IsStackSlot()) {
+ __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
+ } else {
+ __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
+ }
+ __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
+ } else {
+ DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
+ DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
+ // Move to stack from stack
+ if (destination.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
+ } else {
+ __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
+ }
+ }
+ }
+}
+
+void CodeGeneratorMIPS64::SwapLocations(Location loc1,
+ Location loc2,
+ Primitive::Type type ATTRIBUTE_UNUSED) {
+ DCHECK(!loc1.IsConstant());
+ DCHECK(!loc2.IsConstant());
+
+ if (loc1.Equals(loc2)) {
+ return;
+ }
+
+ bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
+ bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
+ bool is_fp_reg1 = loc1.IsFpuRegister();
+ bool is_fp_reg2 = loc2.IsFpuRegister();
+
+ if (loc2.IsRegister() && loc1.IsRegister()) {
+ // Swap 2 GPRs
+ GpuRegister r1 = loc1.AsRegister<GpuRegister>();
+ GpuRegister r2 = loc2.AsRegister<GpuRegister>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if (is_fp_reg2 && is_fp_reg1) {
+ // Swap 2 FPRs
+ FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
+ FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
+ // TODO: Can MOV.S/MOV.D be used here to save one instruction?
+ // Need to distinguish float from double, right?
+ __ Dmfc1(TMP, r2);
+ __ Dmfc1(AT, r1);
+ __ Dmtc1(TMP, r1);
+ __ Dmtc1(AT, r2);
+ } else if (is_slot1 != is_slot2) {
+ // Swap GPR/FPR and stack slot
+ Location reg_loc = is_slot1 ? loc2 : loc1;
+ Location mem_loc = is_slot1 ? loc1 : loc2;
+ LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
+ StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
+ // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
+ __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
+ if (reg_loc.IsFpuRegister()) {
+ __ StoreFpuToOffset(store_type,
+ reg_loc.AsFpuRegister<FpuRegister>(),
+ SP,
+ mem_loc.GetStackIndex());
+ // TODO: review this MTC1/DMTC1 move
+ if (mem_loc.IsStackSlot()) {
+ __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
+ } else {
+ DCHECK(mem_loc.IsDoubleStackSlot());
+ __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
+ }
+ } else {
+ __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
+ __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
+ }
+ } else if (is_slot1 && is_slot2) {
+ move_resolver_.Exchange(loc1.GetStackIndex(),
+ loc2.GetStackIndex(),
+ loc1.IsDoubleStackSlot());
+ } else {
+ LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
+ }
+}
+
+void CodeGeneratorMIPS64::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (instruction->IsIntConstant()
+ || instruction->IsLongConstant()
+ || instruction->IsNullConstant()) {
+ if (location.IsRegister()) {
+ // Move to GPR from constant
+ GpuRegister dst = location.AsRegister<GpuRegister>();
+ if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
+ __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
+ } else {
+ __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
+ }
+ } else {
+ DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
+ // Move to stack from constant
+ if (location.IsStackSlot()) {
+ __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
+ __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
+ } else {
+ __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
+ __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
+ }
+ }
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ MoveLocation(location, temp_location, type);
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ if (Primitive::Is64BitType(type)) {
+ MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
+ } else {
+ MoveLocation(location, Location::StackSlot(stack_slot), type);
+ }
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ MoveLocation(location, locations->Out(), type);
+ }
+}
+
+Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
+ Label done;
+ GpuRegister card = AT;
+ GpuRegister temp = TMP;
+ __ Beqzc(value, &done);
+ __ LoadFromOffset(kLoadDoubleword,
+ card,
+ TR,
+ Thread::CardTableOffset<kMips64WordSize>().Int32Value());
+ __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Daddu(temp, card, temp);
+ __ Sb(card, temp, 0);
+ __ Bind(&done);
+}
+
+void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
+ // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
+ blocked_core_registers_[ZERO] = true;
+ blocked_core_registers_[K0] = true;
+ blocked_core_registers_[K1] = true;
+ blocked_core_registers_[GP] = true;
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[RA] = true;
+
+ // AT and TMP(T8) are used as temporary/scratch registers
+ // (similar to how AT is used by MIPS assemblers).
+ blocked_core_registers_[AT] = true;
+ blocked_core_registers_[TMP] = true;
+ blocked_fpu_registers_[FTMP] = true;
+
+ // Reserve suspend and thread registers.
+ blocked_core_registers_[S0] = true;
+ blocked_core_registers_[TR] = true;
+
+ // Reserve T9 for function calls
+ blocked_core_registers_[T9] = true;
+
+ // TODO: review; anything else?
+
+ // TODO: make these two for's conditional on is_baseline once
+ // all the issues with register saving/restoring are sorted out.
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ blocked_core_registers_[kCoreCalleeSaves[i]] = true;
+ }
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
+ }
+}
+
+Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (Primitive::IsFloatingPointType(type)) {
+ size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
+ return Location::FpuRegisterLocation(reg);
+ } else {
+ size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
+ return Location::RegisterLocation(reg);
+ }
+}
+
+size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
+ return kMips64WordSize;
+}
+
+size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
+ return kMips64WordSize;
+}
+
+size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
+ return kMips64WordSize;
+}
+
+size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
+ return kMips64WordSize;
+}
+
+void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << Mips64ManagedRegister::FromGpuRegister(GpuRegister(reg));
+}
+
+void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << Mips64ManagedRegister::FromFpuRegister(FpuRegister(reg));
+}
+
+void CodeGeneratorMIPS64::LoadCurrentMethod(GpuRegister current_method) {
+ DCHECK(RequiresCurrentMethod());
+ __ Ld(current_method, SP, kCurrentMethodStackOffset);
+}
+
+void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ // TODO: anything related to T9/GP/GOT/PIC/.so's?
+ __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
+ __ Jalr(T9);
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
+ GpuRegister class_reg) {
+ __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadConst32(AT, mirror::Class::kStatusInitialized);
+ __ Bltc(TMP, AT, slow_path->GetEntryLabel());
+ // TODO: barrier needed?
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
+ __ Sync(0); // only stype 0 is supported
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ SuspendCheckSlowPathMIPS64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
+ codegen_->AddSlowPath(slow_path);
+
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ TMP,
+ TR,
+ Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
+ if (successor == nullptr) {
+ __ Bnezc(TMP, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+ } else {
+ __ Beqzc(TMP, codegen_->GetLabelOf(successor));
+ __ B(slow_path->GetEntryLabel());
+ // slow_path will return to GetLabelOf(successor).
+ }
+}
+
+InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
+ CodeGeneratorMIPS64* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
+ DCHECK_EQ(instruction->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type type = instruction->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ HInstruction* right = instruction->InputAt(1);
+ bool can_use_imm = false;
+ if (right->IsConstant()) {
+ int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
+ if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
+ can_use_imm = IsUint<16>(imm);
+ } else if (instruction->IsAdd()) {
+ can_use_imm = IsInt<16>(imm);
+ } else {
+ DCHECK(instruction->IsSub());
+ can_use_imm = IsInt<16>(-imm);
+ }
+ }
+ if (can_use_imm)
+ locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
+ else
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ Location rhs_location = locations->InAt(1);
+
+ GpuRegister rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<GpuRegister>();
+ }
+
+ if (instruction->IsAnd()) {
+ if (use_imm)
+ __ Andi(dst, lhs, rhs_imm);
+ else
+ __ And(dst, lhs, rhs_reg);
+ } else if (instruction->IsOr()) {
+ if (use_imm)
+ __ Ori(dst, lhs, rhs_imm);
+ else
+ __ Or(dst, lhs, rhs_reg);
+ } else if (instruction->IsXor()) {
+ if (use_imm)
+ __ Xori(dst, lhs, rhs_imm);
+ else
+ __ Xor(dst, lhs, rhs_reg);
+ } else if (instruction->IsAdd()) {
+ if (type == Primitive::kPrimInt) {
+ if (use_imm)
+ __ Addiu(dst, lhs, rhs_imm);
+ else
+ __ Addu(dst, lhs, rhs_reg);
+ } else {
+ if (use_imm)
+ __ Daddiu(dst, lhs, rhs_imm);
+ else
+ __ Daddu(dst, lhs, rhs_reg);
+ }
+ } else {
+ DCHECK(instruction->IsSub());
+ if (type == Primitive::kPrimInt) {
+ if (use_imm)
+ __ Addiu(dst, lhs, -rhs_imm);
+ else
+ __ Subu(dst, lhs, rhs_reg);
+ } else {
+ if (use_imm)
+ __ Daddiu(dst, lhs, -rhs_imm);
+ else
+ __ Dsubu(dst, lhs, rhs_reg);
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ if (instruction->IsAdd()) {
+ if (type == Primitive::kPrimFloat)
+ __ AddS(dst, lhs, rhs);
+ else
+ __ AddD(dst, lhs, rhs);
+ } else if (instruction->IsSub()) {
+ if (type == Primitive::kPrimFloat)
+ __ SubS(dst, lhs, rhs);
+ else
+ __ SubD(dst, lhs, rhs);
+ } else {
+ LOG(FATAL) << "Unexpected floating-point binary operation";
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected binary operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ LocationSummary* locations = instr->GetLocations();
+ Primitive::Type type = instr->GetType();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ Location rhs_location = locations->InAt(1);
+
+ GpuRegister rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<GpuRegister>();
+ }
+
+ if (use_imm) {
+ uint32_t shift_value = (type == Primitive::kPrimInt)
+ ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
+ : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
+
+ if (type == Primitive::kPrimInt) {
+ if (instr->IsShl()) {
+ __ Sll(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Sra(dst, lhs, shift_value);
+ } else {
+ __ Srl(dst, lhs, shift_value);
+ }
+ } else {
+ if (shift_value < 32) {
+ if (instr->IsShl()) {
+ __ Dsll(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Dsra(dst, lhs, shift_value);
+ } else {
+ __ Dsrl(dst, lhs, shift_value);
+ }
+ } else {
+ shift_value -= 32;
+ if (instr->IsShl()) {
+ __ Dsll32(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Dsra32(dst, lhs, shift_value);
+ } else {
+ __ Dsrl32(dst, lhs, shift_value);
+ }
+ }
+ }
+ } else {
+ if (type == Primitive::kPrimInt) {
+ if (instr->IsShl()) {
+ __ Sllv(dst, lhs, rhs_reg);
+ } else if (instr->IsShr()) {
+ __ Srav(dst, lhs, rhs_reg);
+ } else {
+ __ Srlv(dst, lhs, rhs_reg);
+ }
+ } else {
+ if (instr->IsShl()) {
+ __ Dsllv(dst, lhs, rhs_reg);
+ } else if (instr->IsShr()) {
+ __ Dsrav(dst, lhs, rhs_reg);
+ } else {
+ __ Dsrlv(dst, lhs, rhs_reg);
+ }
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ Location index = locations->InAt(1);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ } else {
+ __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ } else {
+ __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
+ __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(load_type, out, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(load_type, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
+ __ Daddu(TMP, obj, TMP);
+ __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ StoreToOffset(kStoreByte, value, obj, offset);
+ } else {
+ __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
+ __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
+ __ Daddu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, obj, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
+ __ Daddu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->MarkGCCard(obj, value);
+ }
+ } else {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
+ __ Daddu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreFpuToOffset(kStoreWord, value, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
+ __ Daddu(TMP, obj, TMP);
+ __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
+ } else {
+ __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
+ __ Daddu(TMP, obj, TMP);
+ __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+
+ // Ints and objects are handled in the switch.
+ if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(
+ instruction,
+ locations->InAt(0),
+ locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
+
+ // length is limited by the maximum positive signed 32-bit integer.
+ // Unsigned comparison of length and index checks for index < 0
+ // and for length <= index simultaneously.
+ // Mips R6 requires lhs != rhs for compact branches.
+ if (index == length) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
+
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(
+ instruction,
+ locations->InAt(1),
+ Location::RegisterLocation(obj_cls),
+ instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ Beqzc(obj, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
+}
+
+void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
+ Primitive::Type in_type = compare->InputAt(0)->GetType();
+
+ LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
+
+ switch (in_type) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << in_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ // 0 if: left == right
+ // 1 if: left > right
+ // -1 if: left < right
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
+ // TODO: more efficient (direct) comparison with a constant
+ __ Slt(TMP, lhs, rhs);
+ __ Slt(dst, rhs, lhs);
+ __ Subu(dst, dst, TMP);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int32_t entry_point_offset;
+ if (in_type == Primitive::kPrimFloat) {
+ entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
+ : QUICK_ENTRY_POINT(pCmplFloat);
+ } else {
+ entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
+ : QUICK_ENTRY_POINT(pCmplDouble);
+ }
+ codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+
+ LocationSummary* locations = instruction->GetLocations();
+
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ Location rhs_location = locations->InAt(1);
+
+ GpuRegister rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<GpuRegister>();
+ }
+
+ IfCondition if_cond = instruction->GetCondition();
+
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
+ break;
+
+ case kCondLT:
+ case kCondGE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ __ Slti(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondGE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the slt instruction but no sge.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondLE:
+ case kCondGT:
+ if (use_imm && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondGT) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the slti instruction but no sgti.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, rhs_reg, lhs);
+ if (if_cond == kCondLE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the slt instruction but no sle.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt)
+ __ DivR6(dst, lhs, rhs);
+ else
+ __ Ddiv(dst, lhs, rhs);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ if (type == Primitive::kPrimFloat)
+ __ DivS(dst, lhs, rhs);
+ else
+ __ DivD(dst, lhs, rhs);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Location value = instruction->GetLocations()->InAt(0);
+
+ Primitive::Type type = instruction->GetType();
+
+ if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
+ LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
+ }
+
+ if (value.IsConstant()) {
+ int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
+ if (divisor == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
+ HBasicBlock* successor = got->GetSuccessor();
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
+ Label* true_target,
+ Label* false_target,
+ Label* always_true_target) {
+ HInstruction* cond = instruction->InputAt(0);
+ HCondition* condition = cond->AsCondition();
+
+ if (cond->IsIntConstant()) {
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (always_true_target != nullptr) {
+ __ B(always_true_target);
+ }
+ return;
+ } else {
+ DCHECK_EQ(cond_value, 0);
+ }
+ } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = instruction->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
+ Location rhs_location = condition->GetLocations()->InAt(1);
+ GpuRegister rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<GpuRegister>();
+ }
+
+ IfCondition if_cond = condition->GetCondition();
+ if (use_imm && rhs_imm == 0) {
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beqzc(lhs, true_target);
+ break;
+ case kCondNE:
+ __ Bnezc(lhs, true_target);
+ break;
+ case kCondLT:
+ __ Bltzc(lhs, true_target);
+ break;
+ case kCondGE:
+ __ Bgezc(lhs, true_target);
+ break;
+ case kCondLE:
+ __ Blezc(lhs, true_target);
+ break;
+ case kCondGT:
+ __ Bgtzc(lhs, true_target);
+ break;
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ // It looks like we can get here with lhs == rhs. Should that be possible at all?
+ // Mips R6 requires lhs != rhs for compact branches.
+ if (lhs == rhs_reg) {
+ DCHECK(!use_imm);
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondGE:
+ case kCondLE:
+ // if lhs == rhs for a positive condition, then it is a branch
+ __ B(true_target);
+ break;
+ case kCondNE:
+ case kCondLT:
+ case kCondGT:
+ // if lhs == rhs for a negative condition, then it is a NOP
+ break;
+ }
+ } else {
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beqc(lhs, rhs_reg, true_target);
+ break;
+ case kCondNE:
+ __ Bnec(lhs, rhs_reg, true_target);
+ break;
+ case kCondLT:
+ __ Bltc(lhs, rhs_reg, true_target);
+ break;
+ case kCondGE:
+ __ Bgec(lhs, rhs_reg, true_target);
+ break;
+ case kCondLE:
+ __ Bgec(rhs_reg, lhs, true_target);
+ break;
+ case kCondGT:
+ __ Bltc(rhs_reg, lhs, true_target);
+ break;
+ }
+ }
+ }
+ }
+ if (false_target != nullptr) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
+ Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+ Label* always_true_target = true_target;
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ always_true_target = nullptr;
+ }
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
+ false_target = nullptr;
+ }
+ GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
+}
+
+void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ HInstruction* cond = deoptimize->InputAt(0);
+ DCHECK(cond->IsCondition());
+ if (cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
+ DeoptimizationSlowPathMIPS64(deoptimize);
+ codegen_->AddSlowPath(slow_path);
+ Label* slow_path_entry = slow_path->GetEntryLabel();
+ GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
+}
+
+void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info ATTRIBUTE_UNUSED) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ LoadOperandType load_type = kLoadUnsignedByte;
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ load_type = kLoadUnsignedByte;
+ break;
+ case Primitive::kPrimByte:
+ load_type = kLoadSignedByte;
+ break;
+ case Primitive::kPrimShort:
+ load_type = kLoadSignedHalfword;
+ break;
+ case Primitive::kPrimChar:
+ load_type = kLoadUnsignedHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ load_type = kLoadWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ load_type = kLoadDoubleword;
+ break;
+ case Primitive::kPrimNot:
+ load_type = kLoadUnsignedWord;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+ if (!Primitive::IsFloatingPointType(type)) {
+ DCHECK(locations->Out().IsRegister());
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->Out().IsFpuRegister());
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // TODO: memory barrier?
+}
+
+void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info ATTRIBUTE_UNUSED) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ StoreOperandType store_type = kStoreByte;
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ store_type = kStoreByte;
+ break;
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ store_type = kStoreHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ store_type = kStoreWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ store_type = kStoreDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+ if (!Primitive::IsFloatingPointType(type)) {
+ DCHECK(locations->InAt(1).IsRegister());
+ GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
+ __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->InAt(1).IsFpuRegister());
+ FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // TODO: memory barriers?
+ if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ DCHECK(locations->InAt(1).IsRegister());
+ GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
+ codegen_->MarkGCCard(obj, src);
+ }
+}
+
+void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind =
+ instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // The output does overlap inputs.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ Label done;
+
+ // Return 0 if `obj` is null.
+ // TODO: Avoid this check if we know `obj` is not null.
+ __ Move(out, ZERO);
+ __ Beqzc(obj, &done);
+
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ Xor(out, out, cls);
+ __ Sltiu(out, out, 1);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
+ locations->InAt(1),
+ locations->Out(),
+ instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+ __ Bnec(out, cls, slow_path->GetEntryLabel());
+ __ LoadConst32(out, 1);
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ __ Bind(&done);
+}
+
+void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
+
+ InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
+
+ Primitive::Type return_type = invoke->GetType();
+ if (return_type != Primitive::kPrimVoid) {
+ locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
+ }
+}
+
+void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // The register T0 is required to be used for the hidden argument in
+ // art_quick_imt_conflict_trampoline, so add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
+ Location receiver = invoke->GetLocations()->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
+
+ // Set the hidden argument.
+ __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
+ invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
+ } else {
+ __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO intrinsic function
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ // TODO - intrinsic function
+ HandleInvoke(invoke);
+}
+
+static bool TryGenerateIntrinsicCode(HInvoke* invoke,
+ CodeGeneratorMIPS64* codegen ATTRIBUTE_UNUSED) {
+ if (invoke->GetLocations()->Intrinsified()) {
+ // TODO - intrinsic function
+ return true;
+ }
+ return false;
+}
+
+void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ GpuRegister temp) {
+ // All registers are assumed to be correctly set up per the calling convention.
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ LoadFromOffset(kLoadDoubleword,
+ temp,
+ TR,
+ invoke->GetStringInitOffset());
+ // T9 = temp->entry_point_from_quick_compiled_code_;
+ __ LoadFromOffset(kLoadDoubleword,
+ T9,
+ temp,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kMips64WordSize).Int32Value());
+ // T9()
+ __ Jalr(T9);
+ } else {
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(kLoadUnsignedWord,
+ temp,
+ temp,
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ // temp = temp[index_in_cache]
+ __ LoadFromOffset(kLoadDoubleword,
+ temp,
+ temp,
+ CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()));
+ // T9 = temp[offset_of_quick_compiled_code]
+ __ LoadFromOffset(kLoadDoubleword,
+ T9,
+ temp,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kMips64WordSize).Int32Value());
+ // T9()
+ __ Jalr(T9);
+ } else {
+ __ Jalr(&frame_entry_label_, T9);
+ }
+ }
+
+ DCHECK(!IsLeafMethod());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
+ }
+
+ GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
+
+ codegen_->GenerateStaticOrDirectCall(invoke, temp);
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: Try to generate intrinsics code.
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetMethodAt(method_offset);
+ __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
+ GpuRegister out = cls->GetLocations()->Out().AsRegister<GpuRegister>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadUnsignedWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadUnsignedWord, out, out, ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ cls,
+ cls,
+ cls->GetDexPc(),
+ cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(out, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
+ GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
+ __ LoadFromOffset(kLoadUnsignedWord, out, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
+ __ StoreToOffset(kStoreWord, ZERO, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value());
+}
+
+void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
+ codegen_->AddSlowPath(slow_path);
+
+ GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadUnsignedWord, out, out, ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(kLoadUnsignedWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ Beqzc(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject)
+ : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt)
+ __ MulR6(dst, lhs, rhs);
+ else
+ __ Dmul(dst, lhs, rhs);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ if (type == Primitive::kPrimFloat)
+ __ MulS(dst, lhs, rhs);
+ else
+ __ MulD(dst, lhs, rhs);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected mul type " << type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt)
+ __ Subu(dst, ZERO, src);
+ else
+ __ Dsubu(dst, ZERO, src);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ if (type == Primitive::kPrimFloat)
+ __ NegS(dst, src);
+ else
+ __ NegD(dst, src);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected neg type " << type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2));
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
+ __ Nor(dst, src, ZERO);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ __ Xori(locations->Out().AsRegister<GpuRegister>(),
+ locations->InAt(0).AsRegister<GpuRegister>(),
+ 1);
+}
+
+void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
+ if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
+ GenerateImplicitNullCheck(instruction);
+ } else {
+ GenerateExplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ LocationSummary::CallKind call_kind =
+ Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt)
+ __ ModR6(dst, lhs, rhs);
+ else
+ __ Dmod(dst, lhs, rhs);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
+ : QUICK_ENTRY_POINT(pFmod);
+ codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ Primitive::Type return_type = ret->InputAt(0)->GetType();
+ locations->SetInAt(0, Mips64ReturnLocation(return_type));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
+ ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ HBasicBlock* block = instruction->GetBlock();
+ if (block->GetLoopInformation() != nullptr) {
+ DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
+ // The back edge will generate the suspend check.
+ return;
+ }
+ if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
+ // The goto will generate the suspend check.
+ return;
+ }
+ GenerateSuspendCheck(instruction, nullptr);
+}
+
+void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr);
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type input_type = conversion->GetInputType();
+ Primitive::Type result_type = conversion->GetResultType();
+ DCHECK_NE(input_type, result_type);
+
+ if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+ (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+ LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+ }
+
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
+ (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
+ call_kind = LocationSummary::kCall;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+ if (call_kind == LocationSummary::kNoCall) {
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+
+ if (Primitive::IsFloatingPointType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ } else {
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ }
+
+ locations->SetOut(calling_convention.GetReturnLocation(result_type));
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+
+ DCHECK_NE(input_type, result_type);
+
+ if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
+
+ switch (result_type) {
+ case Primitive::kPrimChar:
+ __ Andi(dst, src, 0xFFFF);
+ break;
+ case Primitive::kPrimByte:
+ // long is never converted into types narrower than int directly,
+ // so SEB and SEH can be used without ever causing unpredictable results
+ // on 64-bit inputs
+ DCHECK(input_type != Primitive::kPrimLong);
+ __ Seb(dst, src);
+ break;
+ case Primitive::kPrimShort:
+ // long is never converted into types narrower than int directly,
+ // so SEB and SEH can be used without ever causing unpredictable results
+ // on 64-bit inputs
+ DCHECK(input_type != Primitive::kPrimLong);
+ __ Seh(dst, src);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ // Sign-extend 32-bit int into bits 32 through 63 for
+ // int-to-long and long-to-int conversions
+ __ Sll(dst, src, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
+ if (input_type != Primitive::kPrimLong) {
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
+ __ Mtc1(src, FTMP);
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsw(dst, FTMP);
+ } else {
+ __ Cvtdw(dst, FTMP);
+ }
+ } else {
+ int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
+ : QUICK_ENTRY_POINT(pL2d);
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr);
+ }
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
+ CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
+ int32_t entry_offset;
+ if (result_type != Primitive::kPrimLong) {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
+ : QUICK_ENTRY_POINT(pD2iz);
+ } else {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
+ : QUICK_ENTRY_POINT(pD2l);
+ }
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr);
+ } else if (Primitive::IsFloatingPointType(result_type) &&
+ Primitive::IsFloatingPointType(input_type)) {
+ FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
+ FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsd(dst, src);
+ } else {
+ __ Cvtds(dst, src);
+ }
+ } else {
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+} // namespace mips64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
new file mode 100644
index 0000000..0ce0add
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
+
+#include "code_generator.h"
+#include "dex/compiler_enums.h"
+#include "driver/compiler_options.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/mips64/assembler_mips64.h"
+
+namespace art {
+namespace mips64 {
+
+// Use a local definition to prevent copying mistakes.
+static constexpr size_t kMips64WordSize = kMips64PointerSize;
+
+
+// InvokeDexCallingConvention registers
+
+static constexpr GpuRegister kParameterCoreRegisters[] =
+ { A1, A2, A3, A4, A5, A6, A7 };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+static constexpr FpuRegister kParameterFpuRegisters[] =
+ { F13, F14, F15, F16, F17, F18, F19 };
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+
+
+// InvokeRuntimeCallingConvention registers
+
+static constexpr GpuRegister kRuntimeParameterCoreRegisters[] =
+ { A0, A1, A2, A3, A4, A5, A6, A7 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+static constexpr FpuRegister kRuntimeParameterFpuRegisters[] =
+ { F12, F13, F14, F15, F16, F17, F18, F19 };
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+
+static constexpr GpuRegister kCoreCalleeSaves[] =
+ { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA }; // TODO: review
+static constexpr FpuRegister kFpuCalleeSaves[] =
+ { F24, F25, F26, F27, F28, F29, F30, F31 };
+
+
+class CodeGeneratorMIPS64;
+
+class InvokeDexCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFpuRegisters,
+ kParameterFpuRegistersLength,
+ kMips64PointerSize) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitorMIPS64() {}
+ virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
+
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
+ Location GetReturnLocation(Primitive::Type type) const;
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS64);
+};
+
+class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength,
+ kMips64PointerSize) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
+ public:
+ ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+
+ void Exchange(int index1, int index2, bool double_slot);
+
+ Mips64Assembler* GetAssembler() const;
+
+ private:
+ CodeGeneratorMIPS64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS64);
+};
+
+class SlowPathCodeMIPS64 : public SlowPathCode {
+ public:
+ SlowPathCodeMIPS64() : entry_label_(), exit_label_() {}
+
+ Label* GetEntryLabel() { return &entry_label_; }
+ Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ Label entry_label_;
+ Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS64);
+};
+
+class LocationsBuilderMIPS64 : public HGraphVisitor {
+ public:
+ LocationsBuilderMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr);
+
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_;
+
+ CodeGeneratorMIPS64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
+};
+
+class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr);
+
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ Mips64Assembler* GetAssembler() const { return assembler_; }
+
+ private:
+ // Generate code for the given suspend check. If not null, `successor`
+ // is the block to branch to if the suspend check is not needed, and after
+ // the suspend call.
+ void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+ void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ void GenerateImplicitNullCheck(HNullCheck* instruction);
+ void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateTestAndBranch(HInstruction* instruction,
+ Label* true_target,
+ Label* false_target,
+ Label* always_true_target);
+
+ Mips64Assembler* const assembler_;
+ CodeGeneratorMIPS64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS64);
+};
+
+class CodeGeneratorMIPS64 : public CodeGenerator {
+ public:
+ CodeGeneratorMIPS64(HGraph* graph,
+ const Mips64InstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options);
+ virtual ~CodeGeneratorMIPS64() {}
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+
+ void Bind(HBasicBlock* block) OVERRIDE;
+
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+
+ size_t GetWordSize() const OVERRIDE { return kMips64WordSize; }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64WordSize; }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return GetLabelOf(block)->Position();
+ }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+
+ void MarkGCCard(GpuRegister object, GpuRegister value);
+
+ // Register allocation.
+
+ void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
+
+ const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const {
+ return isa_features_;
+ }
+
+ Label* GetLabelOf(HBasicBlock* block) const {
+ return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
+ }
+
+ void Initialize() OVERRIDE {
+ block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ }
+
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
+ // Code generation helpers.
+
+ void MoveLocation(Location destination, Location source, Primitive::Type type);
+
+ void SwapLocations(Location loc1, Location loc2, Primitive::Type type);
+
+ void LoadCurrentMethod(GpuRegister current_method);
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(int32_t offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path);
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+
+ bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, GpuRegister temp);
+
+ private:
+ // Labels for each block that will be compiled.
+ GrowableArray<Label> block_labels_;
+ Label frame_entry_label_;
+ LocationsBuilderMIPS64 location_builder_;
+ InstructionCodeGeneratorMIPS64 instruction_visitor_;
+ ParallelMoveResolverMIPS64 move_resolver_;
+ Mips64Assembler assembler_;
+ const Mips64InstructionSetFeatures& isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
+};
+
+} // namespace mips64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 4db3b43..4fbb51d 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -20,6 +20,8 @@
#include "arch/arm/instruction_set_features_arm.h"
#include "arch/arm/registers_arm.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "arch/mips64/registers_mips64.h"
#include "arch/x86/instruction_set_features_x86.h"
#include "arch/x86/registers_x86.h"
#include "arch/x86_64/instruction_set_features_x86_64.h"
@@ -27,6 +29,7 @@
#include "builder.h"
#include "code_generator_arm.h"
#include "code_generator_arm64.h"
+#include "code_generator_mips64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "common_compiler_test.h"
@@ -40,6 +43,7 @@
#include "ssa_liveness_analysis.h"
#include "utils.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
#include "gtest/gtest.h"
@@ -172,6 +176,14 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
if (kRuntimeISA == kArm64) {
Run(allocator, codegenARM64, has_result, expected);
}
+
+ std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
+ Mips64InstructionSetFeatures::FromCppDefines());
+ mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
+ codegenMIPS64.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kMips64) {
+ Run(allocator, codegenMIPS64, has_result, expected);
+ }
}
template <typename Expected>
@@ -222,6 +234,11 @@ static void RunCodeOptimized(HGraph* graph,
X86_64InstructionSetFeatures::FromCppDefines());
x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kMips64) {
+ std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
+ Mips64InstructionSetFeatures::FromCppDefines());
+ mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
+ RunCodeOptimized(&codegenMIPS64, graph, hook_before_codegen, has_result, expected);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b36d9b8..01ba110 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2109,8 +2109,12 @@ class HCompare : public HBinaryOperation {
kLtBias, // return -1 for NaN comparisons
};
- HCompare(Primitive::Type type, HInstruction* first, HInstruction* second, Bias bias)
- : HBinaryOperation(Primitive::kPrimInt, first, second), bias_(bias) {
+ HCompare(Primitive::Type type,
+ HInstruction* first,
+ HInstruction* second,
+ Bias bias,
+ uint32_t dex_pc)
+ : HBinaryOperation(Primitive::kPrimInt, first, second), bias_(bias), dex_pc_(dex_pc) {
DCHECK_EQ(type, first->GetType());
DCHECK_EQ(type, second->GetType());
}
@@ -2135,10 +2139,13 @@ class HCompare : public HBinaryOperation {
bool IsGtBias() { return bias_ == kGtBias; }
+ uint32_t GetDexPc() const { return dex_pc_; }
+
DECLARE_INSTRUCTION(Compare);
private:
const Bias bias_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
@@ -3862,6 +3869,8 @@ class MoveOperands : public ArenaObject<kArenaAllocMisc> {
return source_.IsInvalid();
}
+ Primitive::Type GetType() const { return type_; }
+
bool Is64BitMove() const {
return Primitive::Is64BitType(type_);
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d0d63a4..f1293b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -291,6 +291,7 @@ bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
return instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
+ || instruction_set == kMips64
|| instruction_set == kX86
|| instruction_set == kX86_64;
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 5f439c8..8656ad5 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -80,6 +80,7 @@ bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UN
InstructionSet instruction_set) {
return instruction_set == kArm64
|| instruction_set == kX86_64
+ || instruction_set == kMips64
|| instruction_set == kArm
|| instruction_set == kX86
|| instruction_set == kThumb2;
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 5e49b93..3333cd2 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -25,9 +25,9 @@
namespace art {
namespace mips64 {
-void Mips64Assembler::Emit(int32_t value) {
+void Mips64Assembler::Emit(uint32_t value) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<int32_t>(value);
+ buffer_.Emit<uint32_t>(value);
}
void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
@@ -35,124 +35,62 @@ void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegis
CHECK_NE(rs, kNoGpuRegister);
CHECK_NE(rt, kNoGpuRegister);
CHECK_NE(rd, kNoGpuRegister);
- int32_t encoding = opcode << kOpcodeShift |
- static_cast<int32_t>(rs) << kRsShift |
- static_cast<int32_t>(rt) << kRtShift |
- static_cast<int32_t>(rd) << kRdShift |
- shamt << kShamtShift |
- funct;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ static_cast<uint32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
Emit(encoding);
}
void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
CHECK_NE(rs, kNoGpuRegister);
CHECK_NE(rt, kNoGpuRegister);
- int32_t encoding = opcode << kOpcodeShift |
- static_cast<int32_t>(rs) << kRsShift |
- static_cast<int32_t>(rt) << kRtShift |
- imm;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ imm;
Emit(encoding);
}
-void Mips64Assembler::EmitJ(int opcode, int address) {
- int32_t encoding = opcode << kOpcodeShift |
- address;
+void Mips64Assembler::EmitI21(int opcode, GpuRegister rs, uint32_t imm21) {
+ CHECK_NE(rs, kNoGpuRegister);
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ (imm21 & 0x1FFFFF);
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitJ(int opcode, uint32_t addr26) {
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ (addr26 & 0x3FFFFFF);
Emit(encoding);
}
void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
-int funct) {
+ int funct) {
CHECK_NE(ft, kNoFpuRegister);
CHECK_NE(fs, kNoFpuRegister);
CHECK_NE(fd, kNoFpuRegister);
- int32_t encoding = opcode << kOpcodeShift |
- fmt << kFmtShift |
- static_cast<int32_t>(ft) << kFtShift |
- static_cast<int32_t>(fs) << kFsShift |
- static_cast<int32_t>(fd) << kFdShift |
- funct;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<uint32_t>(ft) << kFtShift |
+ static_cast<uint32_t>(fs) << kFsShift |
+ static_cast<uint32_t>(fd) << kFdShift |
+ funct;
Emit(encoding);
}
-void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm) {
- CHECK_NE(rt, kNoFpuRegister);
- int32_t encoding = opcode << kOpcodeShift |
- fmt << kFmtShift |
- static_cast<int32_t>(rt) << kRtShift |
- imm;
+void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister ft, uint16_t imm) {
+ CHECK_NE(ft, kNoFpuRegister);
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<uint32_t>(ft) << kFtShift |
+ imm;
Emit(encoding);
}
-void Mips64Assembler::EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal) {
- int offset;
- if (label->IsBound()) {
- offset = label->Position() - buffer_.Size();
- } else {
- // Use the offset field of the branch instruction for linking the sites.
- offset = label->position_;
- label->LinkTo(buffer_.Size());
- }
- if (equal) {
- Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
- } else {
- Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
- }
-}
-
-void Mips64Assembler::EmitJump(Label* label, bool link) {
- int offset;
- if (label->IsBound()) {
- offset = label->Position() - buffer_.Size();
- } else {
- // Use the offset field of the jump instruction for linking the sites.
- offset = label->position_;
- label->LinkTo(buffer_.Size());
- }
- if (link) {
- Jal((offset >> 2) & kJumpOffsetMask);
- } else {
- J((offset >> 2) & kJumpOffsetMask);
- }
-}
-
-int32_t Mips64Assembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
- CHECK_ALIGNED(offset, 4);
- CHECK(IsInt<POPCOUNT(kBranchOffsetMask)>(offset)) << offset;
-
- // Properly preserve only the bits supported in the instruction.
- offset >>= 2;
- if (is_jump) {
- offset &= kJumpOffsetMask;
- return (inst & ~kJumpOffsetMask) | offset;
- } else {
- offset &= kBranchOffsetMask;
- return (inst & ~kBranchOffsetMask) | offset;
- }
-}
-
-int Mips64Assembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
- // Sign-extend, then left-shift by 2.
- if (is_jump) {
- return (((inst & kJumpOffsetMask) << 6) >> 4);
- } else {
- return (((inst & kBranchOffsetMask) << 16) >> 14);
- }
-}
-
-void Mips64Assembler::Bind(Label* label, bool is_jump) {
- CHECK(!label->IsBound());
- int bound_pc = buffer_.Size();
- while (label->IsLinked()) {
- int32_t position = label->Position();
- int32_t next = buffer_.Load<int32_t>(position);
- int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
- int32_t encoded = Mips64Assembler::EncodeBranchOffset(offset, next, is_jump);
- buffer_.Store<int32_t>(position, encoded);
- label->position_ = Mips64Assembler::DecodeBranchOffset(next, is_jump);
- }
- label->BindTo(bound_pc);
-}
-
void Mips64Assembler::Add(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, rd, 0, 0x20);
}
@@ -169,6 +107,10 @@ void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
EmitI(0x9, rs, rt, imm16);
}
+void Mips64Assembler::Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x2d);
+}
+
void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
EmitI(0x19, rs, rt, imm16);
}
@@ -181,22 +123,90 @@ void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, rd, 0, 0x23);
}
-void Mips64Assembler::Mult(GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x2f);
+}
+
+void Mips64Assembler::MultR2(GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x18);
}
-void Mips64Assembler::Multu(GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::MultuR2(GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x19);
}
-void Mips64Assembler::Div(GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::DivR2(GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1a);
}
-void Mips64Assembler::Divu(GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::DivuR2(GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1b);
}
+void Mips64Assembler::MulR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0x1c, rs, rt, rd, 0, 2);
+}
+
+void Mips64Assembler::DivR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ DivR2(rs, rt);
+ Mflo(rd);
+}
+
+void Mips64Assembler::ModR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ DivR2(rs, rt);
+ Mfhi(rd);
+}
+
+void Mips64Assembler::DivuR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ DivuR2(rs, rt);
+ Mflo(rd);
+}
+
+void Mips64Assembler::ModuR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ DivuR2(rs, rt);
+ Mfhi(rd);
+}
+
+void Mips64Assembler::MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 2, 0x18);
+}
+
+void Mips64Assembler::DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 2, 0x1a);
+}
+
+void Mips64Assembler::ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 3, 0x1a);
+}
+
+void Mips64Assembler::DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 2, 0x1b);
+}
+
+void Mips64Assembler::ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 3, 0x1b);
+}
+
+void Mips64Assembler::Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 2, 0x1c);
+}
+
+void Mips64Assembler::Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 2, 0x1e);
+}
+
+void Mips64Assembler::Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 3, 0x1e);
+}
+
+void Mips64Assembler::Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 2, 0x1f);
+}
+
+void Mips64Assembler::Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 3, 0x1f);
+}
+
void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, rd, 0, 0x24);
}
@@ -225,30 +235,80 @@ void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
EmitR(0, rs, rt, rd, 0, 0x27);
}
-void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rs, int shamt) {
- EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x00);
+void Mips64Assembler::Seb(GpuRegister rd, GpuRegister rt) {
+ EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x10, 0x20);
}
-void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rs, int shamt) {
- EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x02);
+void Mips64Assembler::Seh(GpuRegister rd, GpuRegister rt) {
+ EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x18, 0x20);
}
-void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rs, int shamt) {
- EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x03);
+void Mips64Assembler::Dext(GpuRegister rt, GpuRegister rs, int pos, int size_less_one) {
+ DCHECK(0 <= pos && pos < 32) << pos;
+ DCHECK(0 <= size_less_one && size_less_one < 32) << size_less_one;
+ EmitR(0x1f, rs, rt, static_cast<GpuRegister>(size_less_one), pos, 3);
}
-void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x00);
+}
+
+void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x02);
+}
+
+void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x03);
+}
+
+void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x04);
}
-void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x06);
}
-void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x07);
}
+void Mips64Assembler::Dsll(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x38);
+}
+
+void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
+}
+
+void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
+}
+
+void Mips64Assembler::Dsll32(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3c);
+}
+
+void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
+}
+
+void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
+}
+
+void Mips64Assembler::Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 0, 0x14);
+}
+
+void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 0, 0x16);
+}
+
+void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 0, 0x17);
+}
+
void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
EmitI(0x20, rs, rt, imm16);
}
@@ -281,6 +341,19 @@ void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
}
+void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
+ EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
+}
+
+void Mips64Assembler::Dati(GpuRegister rs, uint16_t imm16) {
+ EmitI(1, rs, static_cast<GpuRegister>(0x1e), imm16);
+}
+
+void Mips64Assembler::Sync(uint32_t stype) {
+ EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
+ static_cast<GpuRegister>(0), stype & 0x1f, 0xf);
+}
+
void Mips64Assembler::Mfhi(GpuRegister rd) {
EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x10);
}
@@ -321,34 +394,121 @@ void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
EmitI(0xb, rs, rt, imm16);
}
-void Mips64Assembler::Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
EmitI(0x4, rs, rt, imm16);
Nop();
}
-void Mips64Assembler::Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
EmitI(0x5, rs, rt, imm16);
Nop();
}
-void Mips64Assembler::J(uint32_t address) {
- EmitJ(0x2, address);
+void Mips64Assembler::J(uint32_t addr26) {
+ EmitJ(0x2, addr26);
Nop();
}
-void Mips64Assembler::Jal(uint32_t address) {
- EmitJ(0x2, address);
+void Mips64Assembler::Jal(uint32_t addr26) {
+ EmitJ(0x3, addr26);
Nop();
}
-void Mips64Assembler::Jr(GpuRegister rs) {
- EmitR(0, rs, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), 0, 0x09); // Jalr zero, rs
+void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
+ EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
Nop();
}
void Mips64Assembler::Jalr(GpuRegister rs) {
- EmitR(0, rs, static_cast<GpuRegister>(0), RA, 0, 0x09);
- Nop();
+ Jalr(RA, rs);
+}
+
+void Mips64Assembler::Jr(GpuRegister rs) {
+ Jalr(ZERO, rs);
+}
+
+void Mips64Assembler::Auipc(GpuRegister rs, uint16_t imm16) {
+ EmitI(0x3B, rs, static_cast<GpuRegister>(0x1E), imm16);
+}
+
+void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
+ EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
+}
+
+void Mips64Assembler::Jialc(GpuRegister rt, uint16_t imm16) {
+ EmitI(0x3E, static_cast<GpuRegister>(0), rt, imm16);
+}
+
+void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x17, rs, rt, imm16);
+}
+
+void Mips64Assembler::Bltzc(GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rt, ZERO);
+ EmitI(0x17, rt, rt, imm16);
+}
+
+void Mips64Assembler::Bgtzc(GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rt, ZERO);
+ EmitI(0x17, static_cast<GpuRegister>(0), rt, imm16);
+}
+
+void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x16, rs, rt, imm16);
+}
+
+void Mips64Assembler::Bgezc(GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rt, ZERO);
+ EmitI(0x16, rt, rt, imm16);
+}
+
+void Mips64Assembler::Blezc(GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rt, ZERO);
+ EmitI(0x16, static_cast<GpuRegister>(0), rt, imm16);
+}
+
+void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x7, rs, rt, imm16);
+}
+
+void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x6, rs, rt, imm16);
+}
+
+void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x8, (rs < rt) ? rs : rt, (rs < rt) ? rt : rs, imm16);
+}
+
+void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x18, (rs < rt) ? rs : rt, (rs < rt) ? rt : rs, imm16);
+}
+
+void Mips64Assembler::Beqzc(GpuRegister rs, uint32_t imm21) {
+ CHECK_NE(rs, ZERO);
+ EmitI21(0x36, rs, imm21);
+}
+
+void Mips64Assembler::Bnezc(GpuRegister rs, uint32_t imm21) {
+ CHECK_NE(rs, ZERO);
+ EmitI21(0x3E, rs, imm21);
}
void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
@@ -368,23 +528,19 @@ void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
}
void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
- EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
- static_cast<FpuRegister>(fd), 0x0);
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
}
void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
- EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
- static_cast<FpuRegister>(fd), 0x1);
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
}
void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
- EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
- static_cast<FpuRegister>(fd), 0x2);
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
}
void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
- EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
- static_cast<FpuRegister>(fd), 0x3);
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
}
void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
@@ -392,16 +548,47 @@ void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
}
void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), static_cast<FpuRegister>(fs),
- static_cast<FpuRegister>(fd), 0x6);
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x6);
+}
+
+void Mips64Assembler::NegS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x7);
+}
+
+void Mips64Assembler::NegD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x7);
+}
+
+void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
+}
+
+void Mips64Assembler::Cvtdw(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x21);
+}
+
+void Mips64Assembler::Cvtsd(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x20);
+}
+
+void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
}
void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
}
-void Mips64Assembler::Mtc1(FpuRegister ft, GpuRegister rs) {
- EmitFR(0x11, 0x04, ft, static_cast<FpuRegister>(rs), static_cast<FpuRegister>(0), 0x0);
+void Mips64Assembler::Mtc1(GpuRegister rt, FpuRegister fs) {
+ EmitFR(0x11, 0x04, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
+}
+
+void Mips64Assembler::Dmfc1(GpuRegister rt, FpuRegister fs) {
+ EmitFR(0x11, 0x01, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
+}
+
+void Mips64Assembler::Dmtc1(GpuRegister rt, FpuRegister fs) {
+ EmitFR(0x11, 0x05, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
}
void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
@@ -430,45 +617,330 @@ void Mips64Assembler::Nop() {
static_cast<GpuRegister>(0), 0, 0x0);
}
-void Mips64Assembler::Move(GpuRegister rt, GpuRegister rs) {
- EmitI(0x19, rs, rt, 0); // Daddiu
+void Mips64Assembler::Move(GpuRegister rd, GpuRegister rs) {
+ Or(rd, rs, ZERO);
}
-void Mips64Assembler::Clear(GpuRegister rt) {
- EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rt, 0, 0x20);
+void Mips64Assembler::Clear(GpuRegister rd) {
+ Move(rd, ZERO);
}
-void Mips64Assembler::Not(GpuRegister rt, GpuRegister rs) {
- EmitR(0, static_cast<GpuRegister>(0), rs, rt, 0, 0x27);
+void Mips64Assembler::Not(GpuRegister rd, GpuRegister rs) {
+ Nor(rd, rs, ZERO);
}
-void Mips64Assembler::Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
- Mult(rs, rt);
- Mflo(rd);
+void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
+ if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
+ Ori(rd, ZERO, value);
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
+ Addiu(rd, ZERO, value);
+ } else {
+ Lui(rd, value >> 16);
+ if (value & 0xFFFF)
+ Ori(rd, rd, value);
+ }
}
-void Mips64Assembler::Div(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
- Div(rs, rt);
- Mflo(rd);
+void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
+ int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+ // Loads with 1 instruction.
+ if (IsUint<16>(value)) {
+ Ori(rd, ZERO, value);
+ } else if (IsInt<16>(value)) {
+ Daddiu(rd, ZERO, value);
+ } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+ Lui(rd, value >> 16);
+ } else if (IsInt<32>(value)) {
+ // Loads with 2 instructions.
+ Lui(rd, value >> 16);
+ Ori(rd, rd, value);
+ } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+ Ori(rd, ZERO, value);
+ Dahi(rd, value >> 32);
+ } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+ Ori(rd, ZERO, value);
+ Dati(rd, value >> 48);
+ } else if ((value & 0xFFFF) == 0 &&
+ (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
+ Lui(rd, value >> 16);
+ Dahi(rd, (value >> 32) + bit31);
+ } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ Lui(rd, value >> 16);
+ Dati(rd, (value >> 48) + bit31);
+ } else {
+ int shift_cnt = CTZ(value);
+ int64_t tmp = value >> shift_cnt;
+ if (IsUint<16>(tmp)) {
+ Ori(rd, ZERO, tmp);
+ if (shift_cnt < 32)
+ Dsll(rd, rd, shift_cnt);
+ else
+ Dsll32(rd, rd, shift_cnt & 31);
+ } else if (IsInt<16>(tmp)) {
+ Daddiu(rd, ZERO, tmp);
+ if (shift_cnt < 32)
+ Dsll(rd, rd, shift_cnt);
+ else
+ Dsll32(rd, rd, shift_cnt & 31);
+ } else if (IsInt<32>(tmp)) {
+ // Loads with 3 instructions.
+ Lui(rd, tmp >> 16);
+ Ori(rd, rd, tmp);
+ if (shift_cnt < 32)
+ Dsll(rd, rd, shift_cnt);
+ else
+ Dsll32(rd, rd, shift_cnt & 31);
+ } else {
+ shift_cnt = 16 + CTZ(value >> 16);
+ tmp = value >> shift_cnt;
+ if (IsUint<16>(tmp)) {
+ Ori(rd, ZERO, tmp);
+ if (shift_cnt < 32)
+ Dsll(rd, rd, shift_cnt);
+ else
+ Dsll32(rd, rd, shift_cnt & 31);
+ Ori(rd, rd, value);
+ } else if (IsInt<16>(tmp)) {
+ Daddiu(rd, ZERO, tmp);
+ if (shift_cnt < 32)
+ Dsll(rd, rd, shift_cnt);
+ else
+ Dsll32(rd, rd, shift_cnt & 31);
+ Ori(rd, rd, value);
+ } else {
+ // Loads with 3-4 instructions.
+ uint64_t tmp2 = value;
+ bool used_lui = false;
+ if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+ Lui(rd, tmp2 >> 16);
+ used_lui = true;
+ }
+ if ((tmp2 & 0xFFFF) != 0) {
+ if (used_lui)
+ Ori(rd, rd, tmp2);
+ else
+ Ori(rd, ZERO, tmp2);
+ }
+ if (bit31) {
+ tmp2 += UINT64_C(0x100000000);
+ }
+ if (((tmp2 >> 32) & 0xFFFF) != 0) {
+ Dahi(rd, tmp2 >> 32);
+ }
+ if (tmp2 & UINT64_C(0x800000000000)) {
+ tmp2 += UINT64_C(0x1000000000000);
+ }
+ if ((tmp2 >> 48) != 0) {
+ Dati(rd, tmp2 >> 48);
+ }
+ }
+ }
+ }
}
-void Mips64Assembler::Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
- Div(rs, rt);
- Mfhi(rd);
+void Mips64Assembler::Addiu32(GpuRegister rt, GpuRegister rs, int32_t value, GpuRegister rtmp) {
+ if (IsInt<16>(value)) {
+ Addiu(rt, rs, value);
+ } else {
+ LoadConst32(rtmp, value);
+ Addu(rt, rs, rtmp);
+ }
}
-void Mips64Assembler::AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value) {
- CHECK((value >= -32768) && (value <= 32766));
- Daddiu(rt, rs, value);
+void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
+ if (IsInt<16>(value)) {
+ Daddiu(rt, rs, value);
+ } else {
+ LoadConst64(rtmp, value);
+ Daddu(rt, rs, rtmp);
+ }
}
-void Mips64Assembler::LoadImmediate64(GpuRegister rt, int32_t value) {
- CHECK((value >= -32768) && (value <= 32766));
- Daddiu(rt, ZERO, value);
+//
+// MIPS64R6 branches
+//
+//
+// Unconditional (pc + 32-bit signed offset):
+//
+// auipc at, ofs_high
+// jic at, ofs_low
+// // no delay/forbidden slot
+//
+//
+// Conditional (pc + 32-bit signed offset):
+//
+// b<cond>c reg, +2 // skip next 2 instructions
+// auipc at, ofs_high
+// jic at, ofs_low
+// // no delay/forbidden slot
+//
+//
+// Unconditional (pc + 32-bit signed offset) and link:
+//
+// auipc reg, ofs_high
+// daddiu reg, ofs_low
+// jialc reg, 0
+// // no delay/forbidden slot
+//
+//
+// TODO: use shorter instruction sequences whenever possible.
+//
+
+void Mips64Assembler::Bind(Label* label) {
+ CHECK(!label->IsBound());
+ int32_t bound_pc = buffer_.Size();
+
+ // Walk the list of the branches (auipc + jic pairs) referring to and preceding this label.
+ // Embed the previously unknown pc-relative addresses in them.
+ while (label->IsLinked()) {
+ int32_t position = label->Position();
+ // Extract the branch (instruction pair)
+ uint32_t auipc = buffer_.Load<uint32_t>(position);
+ uint32_t jic = buffer_.Load<uint32_t>(position + 4); // actually, jic or daddiu
+
+ // Extract the location of the previous pair in the list (walking the list backwards;
+ // the previous pair location was stored in the immediate operands of the instructions)
+ int32_t prev = (auipc << 16) | (jic & 0xFFFF);
+
+ // Get the pc-relative address
+ uint32_t offset = bound_pc - position;
+ offset += (offset & 0x8000) << 1; // account for sign extension in jic/daddiu
+
+ // Embed it in the two instructions
+ auipc = (auipc & 0xFFFF0000) | (offset >> 16);
+ jic = (jic & 0xFFFF0000) | (offset & 0xFFFF);
+
+ // Save the adjusted instructions
+ buffer_.Store<uint32_t>(position, auipc);
+ buffer_.Store<uint32_t>(position + 4, jic);
+
+ // On to the previous branch in the list...
+ label->position_ = prev;
+ }
+
+ // Now make the label object contain its own location
+ // (it will be used by the branches referring to and following this label)
+ label->BindTo(bound_pc);
+}
+
+void Mips64Assembler::B(Label* label) {
+ if (label->IsBound()) {
+ // Branch backwards (to a preceding label), distance is known
+ uint32_t offset = label->Position() - buffer_.Size();
+ CHECK_LE(static_cast<int32_t>(offset), 0);
+ offset += (offset & 0x8000) << 1; // account for sign extension in jic
+ Auipc(AT, offset >> 16);
+ Jic(AT, offset);
+ } else {
+ // Branch forward (to a following label), distance is unknown
+ int32_t position = buffer_.Size();
+ // The first branch forward will have 0 in its pc-relative address (copied from label's
+ // position). It will be the terminator of the list of forward-reaching branches.
+ uint32_t prev = label->position_;
+ Auipc(AT, prev >> 16);
+ Jic(AT, prev);
+ // Now make the link object point to the location of this branch
+ // (this forms a linked list of branches preceding this label)
+ label->LinkTo(position);
+ }
+}
+
+void Mips64Assembler::Jalr(Label* label, GpuRegister indirect_reg) {
+ if (label->IsBound()) {
+ // Branch backwards (to a preceding label), distance is known
+ uint32_t offset = label->Position() - buffer_.Size();
+ CHECK_LE(static_cast<int32_t>(offset), 0);
+ offset += (offset & 0x8000) << 1; // account for sign extension in daddiu
+ Auipc(indirect_reg, offset >> 16);
+ Daddiu(indirect_reg, indirect_reg, offset);
+ Jialc(indirect_reg, 0);
+ } else {
+ // Branch forward (to a following label), distance is unknown
+ int32_t position = buffer_.Size();
+ // The first branch forward will have 0 in its pc-relative address (copied from label's
+ // position). It will be the terminator of the list of forward-reaching branches.
+ uint32_t prev = label->position_;
+ Auipc(indirect_reg, prev >> 16);
+ Daddiu(indirect_reg, indirect_reg, prev);
+ Jialc(indirect_reg, 0);
+ // Now make the link object point to the location of this branch
+ // (this forms a linked list of branches preceding this label)
+ label->LinkTo(position);
+ }
+}
+
+void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Label* label) {
+ Bgec(rs, rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bltzc(GpuRegister rt, Label* label) {
+ Bgezc(rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bgtzc(GpuRegister rt, Label* label) {
+ Blezc(rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Label* label) {
+ Bltc(rs, rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bgezc(GpuRegister rt, Label* label) {
+ Bltzc(rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Blezc(GpuRegister rt, Label* label) {
+ Bgtzc(rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Label* label) {
+ Bgeuc(rs, rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Label* label) {
+ Bltuc(rs, rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Label* label) {
+ Bnec(rs, rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Label* label) {
+ Beqc(rs, rt, 2);
+ B(label);
+}
+
+void Mips64Assembler::Beqzc(GpuRegister rs, Label* label) {
+ Bnezc(rs, 2);
+ B(label);
+}
+
+void Mips64Assembler::Bnezc(GpuRegister rs, Label* label) {
+ Beqzc(rs, 2);
+ B(label);
}
void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Daddu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kLoadSignedByte:
Lb(reg, base, offset);
@@ -489,23 +961,25 @@ void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuR
Lwu(reg, base, offset);
break;
case kLoadDoubleword:
- // TODO: alignment issues ???
Ld(reg, base, offset);
break;
- default:
- LOG(FATAL) << "UNREACHABLE";
}
}
void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
int32_t offset) {
- CHECK((offset >= -32768) && (offset <= 32766));
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Daddu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kLoadWord:
Lwc1(reg, base, offset);
break;
case kLoadDoubleword:
- // TODO: alignment issues ???
Ldc1(reg, base, offset);
break;
default:
@@ -542,6 +1016,13 @@ void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register,
void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Daddu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kStoreByte:
Sb(reg, base, offset);
@@ -553,7 +1034,6 @@ void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuR
Sw(reg, base, offset);
break;
case kStoreDoubleword:
- // TODO: alignment issues ???
Sd(reg, base, offset);
break;
default:
@@ -563,6 +1043,13 @@ void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuR
void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Daddu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kStoreWord:
Swc1(reg, base, offset);
@@ -613,10 +1100,12 @@ void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
// only increment stack offset.
offset += size;
} else if (reg.IsFpuRegister()) {
- StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsFpuRegister(), SP, offset);
+ StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
+ reg.AsFpuRegister(), SP, offset);
offset += size;
} else if (reg.IsGpuRegister()) {
- StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsGpuRegister(), SP, offset);
+ StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
+ reg.AsGpuRegister(), SP, offset);
offset += size;
}
}
@@ -650,14 +1139,14 @@ void Mips64Assembler::RemoveFrame(size_t frame_size,
}
void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant64(SP, SP, -adjust);
+ CHECK_ALIGNED(adjust, kFramePointerSize);
+ Daddiu64(SP, SP, static_cast<int32_t>(-adjust));
cfi_.AdjustCFAOffset(adjust);
}
void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant64(SP, SP, adjust);
+ CHECK_ALIGNED(adjust, kFramePointerSize);
+ Daddiu64(SP, SP, static_cast<int32_t>(adjust));
cfi_.AdjustCFAOffset(-adjust);
}
@@ -702,7 +1191,7 @@ void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
- LoadImmediate64(scratch.AsGpuRegister(), imm);
+ LoadConst32(scratch.AsGpuRegister(), imm);
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
@@ -710,7 +1199,9 @@ void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t im
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
- LoadImmediate64(scratch.AsGpuRegister(), imm);
+ // TODO: it's unclear wether 32 or 64 bits need to be stored (Arm64 and x86/x64 disagree?).
+ // Is this function even referenced anywhere else in the code?
+ LoadConst32(scratch.AsGpuRegister(), imm);
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value());
}
@@ -719,7 +1210,7 @@ void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
- AddConstant64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
+ Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
@@ -757,20 +1248,24 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, Membe
LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
if (kPoisonHeapReferences && poison_reference) {
- Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
+ // TODO: review
+ // Negate the 32-bit ref
+ Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
+ // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64
+ Dext(dest.AsGpuRegister(), dest.AsGpuRegister(), 0, 31);
}
}
void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
+ Offset offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
- CHECK(dest.IsGpuRegister() && dest.IsGpuRegister()) << dest;
+ CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
}
void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
- ThreadOffset<8> offs) {
+ ThreadOffset<8> offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
@@ -849,7 +1344,7 @@ void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
}
void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
+ ManagedRegister mscratch, size_t size) {
GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
CHECK(size == 4 || size == 8) << size;
if (size == 4) {
@@ -866,7 +1361,7 @@ void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset sr
}
void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister mscratch, size_t size) {
+ ManagedRegister mscratch, size_t size) {
GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
CHECK(size == 4 || size == 8) << size;
if (size == 4) {
@@ -888,8 +1383,8 @@ void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offse
}
void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
CHECK(size == 4 || size == 8) << size;
if (size == 4) {
@@ -912,12 +1407,14 @@ void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOf
}
void Mips64Assembler::MemoryBarrier(ManagedRegister) {
+ // TODO: sync?
UNIMPLEMENTED(FATAL) << "no mips64 implementation";
}
void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
Mips64ManagedRegister out_reg = mout_reg.AsMips64();
Mips64ManagedRegister in_reg = min_reg.AsMips64();
CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
@@ -933,20 +1430,20 @@ void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
- LoadImmediate64(out_reg.AsGpuRegister(), 0);
+ LoadConst32(out_reg.AsGpuRegister(), 0);
}
- EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
- AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
- Bind(&null_arg, false);
+ Beqzc(in_reg.AsGpuRegister(), &null_arg);
+ Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg);
} else {
- AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
}
}
void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
if (null_allowed) {
@@ -956,30 +1453,30 @@ void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
// e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- EmitBranch(scratch.AsGpuRegister(), ZERO, &null_arg, true);
- AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
- Bind(&null_arg, false);
+ Beqzc(scratch.AsGpuRegister(), &null_arg);
+ Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg);
} else {
- AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
}
// Given a handle scope entry, load the associated reference.
void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
+ ManagedRegister min_reg) {
Mips64ManagedRegister out_reg = mout_reg.AsMips64();
Mips64ManagedRegister in_reg = min_reg.AsMips64();
CHECK(out_reg.IsGpuRegister()) << out_reg;
CHECK(in_reg.IsGpuRegister()) << in_reg;
Label null_arg;
if (!out_reg.Equals(in_reg)) {
- LoadImmediate64(out_reg.AsGpuRegister(), 0);
+ LoadConst32(out_reg.AsGpuRegister(), 0);
}
- EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
+ Beqzc(in_reg.AsGpuRegister(), &null_arg);
LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
in_reg.AsGpuRegister(), 0);
- Bind(&null_arg, false);
+ Bind(&null_arg);
}
void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
@@ -1022,7 +1519,7 @@ void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
}
void Mips64Assembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*mscratch*/) {
+ ManagedRegister /*mscratch*/) {
StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
}
@@ -1032,13 +1529,13 @@ void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjus
buffer_.EnqueueSlowPath(slow);
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
S1, Thread::ExceptionOffset<8>().Int32Value());
- EmitBranch(scratch.AsGpuRegister(), ZERO, slow->Entry(), false);
+ Bnezc(scratch.AsGpuRegister(), slow->Entry());
}
void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm);
#define __ sp_asm->
- __ Bind(&entry_, false);
+ __ Bind(&entry_);
if (stack_adjust_ != 0) { // Fix up the frame.
__ DecreaseFrameSize(stack_adjust_);
}
@@ -1048,6 +1545,7 @@ void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
// Set up call to Thread::Current()->pDeliverException
__ LoadFromOffset(kLoadDoubleword, T9, S1,
QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
+ // TODO: check T9 usage
__ Jr(T9);
// Call never returns
__ Break();
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 38419ab..88cc4bc 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -56,13 +56,31 @@ class Mips64Assembler FINAL : public Assembler {
void Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
+ void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
void Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
- void Mult(GpuRegister rs, GpuRegister rt);
- void Multu(GpuRegister rs, GpuRegister rt);
- void Div(GpuRegister rs, GpuRegister rt);
- void Divu(GpuRegister rs, GpuRegister rt);
+ void Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
+
+ void MultR2(GpuRegister rs, GpuRegister rt); // R2
+ void MultuR2(GpuRegister rs, GpuRegister rt); // R2
+ void DivR2(GpuRegister rs, GpuRegister rt); // R2
+ void DivuR2(GpuRegister rs, GpuRegister rt); // R2
+ void MulR2(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R2
+ void DivR2(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R2
+ void ModR2(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R2
+ void DivuR2(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R2
+ void ModuR2(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R2
+ void MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R6
+ void DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R6
+ void ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R6
+ void DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R6
+ void ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt); // R6
+ void Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64 R6
+ void Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64 R6
+ void Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64 R6
+ void Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64 R6
+ void Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64 R6
void And(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
@@ -72,40 +90,72 @@ class Mips64Assembler FINAL : public Assembler {
void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
- void Sll(GpuRegister rd, GpuRegister rs, int shamt);
- void Srl(GpuRegister rd, GpuRegister rs, int shamt);
- void Sra(GpuRegister rd, GpuRegister rs, int shamt);
- void Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt);
- void Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt);
- void Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Seb(GpuRegister rd, GpuRegister rt); // R2+
+ void Seh(GpuRegister rd, GpuRegister rt); // R2+
+ void Dext(GpuRegister rs, GpuRegister rt, int pos, int size_less_one); // MIPS64
+
+ void Sll(GpuRegister rd, GpuRegister rt, int shamt);
+ void Srl(GpuRegister rd, GpuRegister rt, int shamt);
+ void Sra(GpuRegister rd, GpuRegister rt, int shamt);
+ void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
+ void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
+ void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
+ void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Dsra32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
+ void Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
+ void Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
void Lui(GpuRegister rt, uint16_t imm16);
- void Mfhi(GpuRegister rd);
- void Mflo(GpuRegister rd);
+ void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64 R6
+ void Dati(GpuRegister rs, uint16_t imm16); // MIPS64 R6
+ void Sync(uint32_t stype);
+ void Mfhi(GpuRegister rd); // R2
+ void Mflo(GpuRegister rd); // R2
void Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
void Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16);
- void J(uint32_t address);
- void Jal(uint32_t address);
- void Jr(GpuRegister rs);
+ void Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16);
+ void Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16);
+ void J(uint32_t addr26);
+ void Jal(uint32_t addr26);
+ void Jalr(GpuRegister rd, GpuRegister rs);
void Jalr(GpuRegister rs);
+ void Jr(GpuRegister rs);
+ void Auipc(GpuRegister rs, uint16_t imm16); // R6
+ void Jic(GpuRegister rt, uint16_t imm16); // R6
+ void Jialc(GpuRegister rt, uint16_t imm16); // R6
+ void Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16); // R6
+ void Bltzc(GpuRegister rt, uint16_t imm16); // R6
+ void Bgtzc(GpuRegister rt, uint16_t imm16); // R6
+ void Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16); // R6
+ void Bgezc(GpuRegister rt, uint16_t imm16); // R6
+ void Blezc(GpuRegister rt, uint16_t imm16); // R6
+ void Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16); // R6
+ void Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16); // R6
+ void Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16); // R6
+ void Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16); // R6
+ void Beqzc(GpuRegister rs, uint32_t imm21); // R6
+ void Bnezc(GpuRegister rs, uint32_t imm21); // R6
void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
@@ -117,9 +167,18 @@ class Mips64Assembler FINAL : public Assembler {
void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void MovS(FpuRegister fd, FpuRegister fs);
void MovD(FpuRegister fd, FpuRegister fs);
+ void NegS(FpuRegister fd, FpuRegister fs);
+ void NegD(FpuRegister fd, FpuRegister fs);
+
+ void Cvtsw(FpuRegister fd, FpuRegister fs);
+ void Cvtdw(FpuRegister fd, FpuRegister fs);
+ void Cvtsd(FpuRegister fd, FpuRegister fs);
+ void Cvtds(FpuRegister fd, FpuRegister fs);
void Mfc1(GpuRegister rt, FpuRegister fs);
- void Mtc1(FpuRegister ft, GpuRegister rs);
+ void Mtc1(GpuRegister rt, FpuRegister fs);
+ void Dmfc1(GpuRegister rt, FpuRegister fs); // MIPS64
+ void Dmtc1(GpuRegister rt, FpuRegister fs); // MIPS64
void Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
void Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
void Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
@@ -127,15 +186,33 @@ class Mips64Assembler FINAL : public Assembler {
void Break();
void Nop();
- void Move(GpuRegister rt, GpuRegister rs);
- void Clear(GpuRegister rt);
- void Not(GpuRegister rt, GpuRegister rs);
- void Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt);
- void Div(GpuRegister rd, GpuRegister rs, GpuRegister rt);
- void Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-
- void AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value);
- void LoadImmediate64(GpuRegister rt, int32_t value);
+ void Move(GpuRegister rd, GpuRegister rs);
+ void Clear(GpuRegister rd);
+ void Not(GpuRegister rd, GpuRegister rs);
+
+ // Higher level composite instructions
+ void LoadConst32(GpuRegister rd, int32_t value);
+ void LoadConst64(GpuRegister rd, int64_t value); // MIPS64
+
+ void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value, GpuRegister rtmp = AT);
+ void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64
+
+ void Bind(Label* label); // R6
+ void B(Label* label); // R6
+ void Jalr(Label* label, GpuRegister indirect_reg = RA); // R6
+ // TODO: implement common for R6 and non-R6 interface for conditional branches?
+ void Bltc(GpuRegister rs, GpuRegister rt, Label* label); // R6
+ void Bltzc(GpuRegister rt, Label* label); // R6
+ void Bgtzc(GpuRegister rt, Label* label); // R6
+ void Bgec(GpuRegister rs, GpuRegister rt, Label* label); // R6
+ void Bgezc(GpuRegister rt, Label* label); // R6
+ void Blezc(GpuRegister rt, Label* label); // R6
+ void Bltuc(GpuRegister rs, GpuRegister rt, Label* label); // R6
+ void Bgeuc(GpuRegister rs, GpuRegister rt, Label* label); // R6
+ void Beqc(GpuRegister rs, GpuRegister rt, Label* label); // R6
+ void Bnec(GpuRegister rs, GpuRegister rt, Label* label); // R6
+ void Beqzc(GpuRegister rs, Label* label); // R6
+ void Bnezc(GpuRegister rs, Label* label); // R6
void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
@@ -144,10 +221,7 @@ class Mips64Assembler FINAL : public Assembler {
void StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
// Emit data (e.g. encoded instruction or immediate) to the instruction stream.
- void Emit(int32_t value);
- void EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal);
- void EmitJump(Label* label, bool link);
- void Bind(Label* label, bool is_jump);
+ void Emit(uint32_t value);
//
// Overridden common assembler high-level functionality
@@ -269,13 +343,11 @@ class Mips64Assembler FINAL : public Assembler {
private:
void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct);
void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm);
- void EmitJ(int opcode, int address);
+ void EmitI21(int opcode, GpuRegister rs, uint32_t imm21);
+ void EmitJ(int opcode, uint32_t addr26);
void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct);
void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm);
- int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
- int DecodeBranchOffset(int32_t inst, bool is_jump);
-
DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
};
diff --git a/compiler/utils/mips64/constants_mips64.h b/compiler/utils/mips64/constants_mips64.h
index 8b7697c..f57498d 100644
--- a/compiler/utils/mips64/constants_mips64.h
+++ b/compiler/utils/mips64/constants_mips64.h
@@ -67,7 +67,7 @@ class Instr {
static const uint32_t kBreakPointInstruction = 0x0000000D;
bool IsBreakPoint() {
- return ((*reinterpret_cast<const uint32_t*>(this)) & 0xFC0000CF) == kBreakPointInstruction;
+ return ((*reinterpret_cast<const uint32_t*>(this)) & 0xFC00003F) == kBreakPointInstruction;
}
// Instructions are read out of a code stream. The only way to get a