summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Rames <alexandre.rames@arm.com>2015-01-29 16:57:31 +0000
committerAlexandre Rames <alexandre.rames@arm.com>2015-01-29 16:57:31 +0000
commit542361f6e9ff05e3ca1f56c94c88bc3efeddd9c4 (patch)
treef9c914c1ca168d1c93148b95bec7f3e8b0659542
parentc9ff6b112d25657128f9a7251e253b1382b0f1b9 (diff)
downloadart-542361f6e9ff05e3ca1f56c94c88bc3efeddd9c4.zip
art-542361f6e9ff05e3ca1f56c94c88bc3efeddd9c4.tar.gz
art-542361f6e9ff05e3ca1f56c94c88bc3efeddd9c4.tar.bz2
Introduce primitive type helpers.
Change-Id: I81e909a185787f109c0afafa27b4335050a0dcdf
-rw-r--r--compiler/optimizing/code_generator_arm64.cc56
-rw-r--r--compiler/optimizing/common_arm64.h36
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc3
-rw-r--r--runtime/primitive.h21
4 files changed, 58 insertions, 58 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0909424..430baf6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -52,9 +52,6 @@ using helpers::InputFPRegisterAt;
using helpers::InputRegisterAt;
using helpers::InputOperandAt;
using helpers::Int64ConstantFrom;
-using helpers::Is64BitType;
-using helpers::IsFPType;
-using helpers::IsIntegralType;
using helpers::LocationFrom;
using helpers::OperandFromMemOperand;
using helpers::OutputCPURegister;
@@ -383,18 +380,20 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
LOG(FATAL) << "Unreachable type " << type;
}
- if (IsFPType(type) && (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
+ if (Primitive::IsFloatingPointType(type) &&
+ (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
- } else if (!IsFPType(type) && (gp_index_ < calling_convention.GetNumberOfRegisters())) {
+ } else if (!Primitive::IsFloatingPointType(type) &&
+ (gp_index_ < calling_convention.GetNumberOfRegisters())) {
next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
} else {
size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
- next_location = Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
- : Location::StackSlot(stack_offset);
+ next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
}
// Space on the stack is reserved for all arguments.
- stack_index_ += Is64BitType(type) ? 2 : 1;
+ stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
return next_location;
}
@@ -507,7 +506,7 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
MoveLocation(location, temp_location, type);
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- if (Is64BitType(type)) {
+ if (Primitive::Is64BitType(type)) {
MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
} else {
MoveLocation(location, Location::StackSlot(stack_slot), type);
@@ -583,7 +582,7 @@ Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
LOG(FATAL) << "Unreachable type " << type;
}
- if (IsFPType(type)) {
+ if (Primitive::IsFloatingPointType(type)) {
ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
DCHECK_NE(reg, -1);
return Location::FpuRegisterLocation(reg);
@@ -675,8 +674,8 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && IsFPType(type)) ||
- (destination.IsRegister() && !IsFPType(type)));
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
CPURegister dst = CPURegisterFrom(destination, type);
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
@@ -702,8 +701,8 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Is64BitType(type)) &&
- (source.IsFpuRegister() == IsFPType(type)));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
__ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
DCHECK(unspecified_type || CoherentConstantAndType(source, type));
@@ -816,7 +815,7 @@ void CodeGeneratorARM64::Load(Primitive::Type type,
case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
__ Ldr(dst, src);
break;
case Primitive::kPrimVoid:
@@ -859,14 +858,14 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
__ Ldar(Register(dst), base);
MaybeRecordImplicitNullCheck(instruction);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
DCHECK(dst.IsFPRegister());
- DCHECK_EQ(dst.Is64Bits(), Is64BitType(type));
+ DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
__ Ldar(temp, base);
@@ -896,7 +895,7 @@ void CodeGeneratorARM64::Store(Primitive::Type type,
case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(src.Is64Bits(), Is64BitType(type));
+ DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
__ Str(src, dst);
break;
case Primitive::kPrimVoid:
@@ -929,13 +928,13 @@ void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK_EQ(src.Is64Bits(), Is64BitType(type));
+ DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
__ Stlr(Register(src), base);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
DCHECK(src.IsFPRegister());
- DCHECK_EQ(src.Is64Bits(), Is64BitType(type));
+ DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
__ Fmov(temp, FPRegister(src));
@@ -2243,8 +2242,8 @@ void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
void LocationsBuilderARM64::VisitRem(HRem* rem) {
Primitive::Type type = rem->GetResultType();
- LocationSummary::CallKind call_kind = IsFPType(type) ? LocationSummary::kCall
- : LocationSummary::kNoCall;
+ LocationSummary::CallKind call_kind =
+ Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
switch (type) {
@@ -2479,13 +2478,13 @@ void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
}
- if (IsFPType(input_type)) {
+ if (Primitive::IsFloatingPointType(input_type)) {
locations->SetInAt(0, Location::RequiresFpuRegister());
} else {
locations->SetInAt(0, Location::RequiresRegister());
}
- if (IsFPType(result_type)) {
+ if (Primitive::IsFloatingPointType(result_type)) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -2498,7 +2497,7 @@ void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* convers
DCHECK_NE(input_type, result_type);
- if (IsIntegralType(result_type) && IsIntegralType(input_type)) {
+ if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
int result_size = Primitive::ComponentSize(result_type);
int input_size = Primitive::ComponentSize(input_type);
int min_size = std::min(result_size, input_size);
@@ -2512,12 +2511,13 @@ void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* convers
} else {
__ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
}
- } else if (IsFPType(result_type) && IsIntegralType(input_type)) {
+ } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
__ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
- } else if (IsIntegralType(result_type) && IsFPType(input_type)) {
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
__ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
- } else if (IsFPType(result_type) && IsFPType(input_type)) {
+ } else if (Primitive::IsFloatingPointType(result_type) &&
+ Primitive::IsFloatingPointType(input_type)) {
__ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
} else {
LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 7077f98..007324e 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -27,27 +27,6 @@ namespace art {
namespace arm64 {
namespace helpers {
-constexpr bool IsFPType(Primitive::Type type) {
- return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
-}
-
-static inline bool IsIntegralType(Primitive::Type type) {
- switch (type) {
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimLong:
- return true;
- default:
- return false;
- }
-}
-
-constexpr bool Is64BitType(Primitive::Type type) {
- return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
-}
-
// Convenience helpers to ease conversion to and from VIXL operands.
static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
"Unexpected values for register codes.");
@@ -83,7 +62,7 @@ static inline vixl::Register WRegisterFrom(Location location) {
}
static inline vixl::Register RegisterFrom(Location location, Primitive::Type type) {
- DCHECK(type != Primitive::kPrimVoid && !IsFPType(type));
+ DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type));
return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
}
@@ -107,7 +86,7 @@ static inline vixl::FPRegister SRegisterFrom(Location location) {
}
static inline vixl::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
- DCHECK(IsFPType(type));
+ DCHECK(Primitive::IsFloatingPointType(type));
return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
}
@@ -121,17 +100,18 @@ static inline vixl::FPRegister InputFPRegisterAt(HInstruction* instr, int input_
}
static inline vixl::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
- return IsFPType(type) ? vixl::CPURegister(FPRegisterFrom(location, type))
- : vixl::CPURegister(RegisterFrom(location, type));
+ return Primitive::IsFloatingPointType(type) ? vixl::CPURegister(FPRegisterFrom(location, type))
+ : vixl::CPURegister(RegisterFrom(location, type));
}
static inline vixl::CPURegister OutputCPURegister(HInstruction* instr) {
- return IsFPType(instr->GetType()) ? static_cast<vixl::CPURegister>(OutputFPRegister(instr))
- : static_cast<vixl::CPURegister>(OutputRegister(instr));
+ return Primitive::IsFloatingPointType(instr->GetType())
+ ? static_cast<vixl::CPURegister>(OutputFPRegister(instr))
+ : static_cast<vixl::CPURegister>(OutputRegister(instr));
}
static inline vixl::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
- return IsFPType(instr->InputAt(index)->GetType())
+ return Primitive::IsFloatingPointType(instr->InputAt(index)->GetType())
? static_cast<vixl::CPURegister>(InputFPRegisterAt(instr, index))
: static_cast<vixl::CPURegister>(InputRegisterAt(instr, index));
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 6d10544..ba26afe 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -39,7 +39,6 @@ namespace arm64 {
using helpers::DRegisterFrom;
using helpers::FPRegisterFrom;
using helpers::HeapOperand;
-using helpers::IsIntegralType;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
using helpers::WRegisterFrom;
@@ -74,7 +73,7 @@ static void MoveFromReturnRegister(Location trg,
DCHECK_NE(type, Primitive::kPrimVoid);
- if (IsIntegralType(type)) {
+ if (Primitive::IsIntegralType(type)) {
Register trg_reg = RegisterFrom(trg, type);
Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type);
__ Mov(trg_reg, res_reg, kDiscardForSameWReg);
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 50d171c..9dda144 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -148,6 +148,27 @@ class Primitive {
static const char* PrettyDescriptor(Type type);
+ static bool IsFloatingPointType(Type type) {
+ return type == kPrimFloat || type == kPrimDouble;
+ }
+
+ static bool IsIntegralType(Type type) {
+ switch (type) {
+ case kPrimByte:
+ case kPrimChar:
+ case kPrimShort:
+ case kPrimInt:
+ case kPrimLong:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool Is64BitType(Type type) {
+ return type == kPrimLong || type == kPrimDouble;
+ }
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Primitive);
};