summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2015-02-13 19:23:55 -0800
committerAndreas Gampe <agampe@google.com>2015-02-18 16:50:22 -0800
commitab1eb0d1d047e3478ebb891e5259d2f1d1dd78bd (patch)
treea2d211ec81294adab2981d0179c8f04be3e8c8c4
parent6e27f82193a8f54cd8ecdc8fb2c4c1adadafbaf4 (diff)
downloadart-ab1eb0d1d047e3478ebb891e5259d2f1d1dd78bd.zip
art-ab1eb0d1d047e3478ebb891e5259d2f1d1dd78bd.tar.gz
art-ab1eb0d1d047e3478ebb891e5259d2f1d1dd78bd.tar.bz2
ART: Templatize IsInt & IsUint
Ensure that things are used correctly. Change-Id: I76f082b32dcee28bbfb4c519daa401ac595873b3
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc4
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc9
-rw-r--r--compiler/utils/arm/assembler_arm.cc31
-rw-r--r--compiler/utils/arm/assembler_arm32.cc2
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc4
-rw-r--r--compiler/utils/x86/assembler_x86.cc4
-rw-r--r--compiler/utils/x86/assembler_x86.h8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc35
-rw-r--r--runtime/class_linker.cc6
-rw-r--r--runtime/dex_file.cc6
-rw-r--r--runtime/utils.h66
13 files changed, 112 insertions, 87 deletions
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f7968c2..7e916be 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -238,7 +238,7 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
&field_offset, &is_volatile);
- if (fast_path && !is_volatile && IsUint(16, field_offset.Int32Value())) {
+ if (fast_path && !is_volatile && IsUint<16>(field_offset.Int32Value())) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< " to " << Instruction::Name(new_opcode)
<< " by replacing field index " << field_idx
@@ -274,7 +274,7 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst,
&target_method, &vtable_idx,
&direct_code, &direct_method);
if (fast_path && original_invoke_type == invoke_type) {
- if (vtable_idx >= 0 && IsUint(16, vtable_idx)) {
+ if (vtable_idx >= 0 && IsUint<16>(vtable_idx)) {
VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
<< "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")"
<< " to " << Instruction::Name(new_opcode)
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 6f6bf68..ec6edab 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,7 +56,8 @@ LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
- return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
+ // For encodings, see LoadConstantNoClobber below.
+ return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
@@ -96,9 +97,11 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
/* See if the value can be constructed cheaply */
if (value == 0) {
res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
- } else if ((value > 0) && (value <= 65535)) {
+ } else if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
res = NewLIR3(kMipsOri, r_dest.GetReg(), rZERO, value);
- } else if ((value < 0) && (value >= -32768)) {
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
res = NewLIR3(kMipsAddiu, r_dest.GetReg(), rZERO, value);
} else {
res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1f44f19..a52e6eb 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -166,7 +166,7 @@ uint32_t ShifterOperand::encodingThumb() const {
}
uint32_t Address::encodingArm() const {
- CHECK(IsAbsoluteUint(12, offset_));
+ CHECK(IsAbsoluteUint<12>(offset_));
uint32_t encoding;
if (is_immed_offset_) {
if (offset_ < 0) {
@@ -278,11 +278,12 @@ uint32_t Address::encoding3() const {
// Encoding for vfp load/store addressing.
uint32_t Address::vencoding() const {
+ CHECK(IsAbsoluteUint<10>(offset_)); // In the range -1020 to +1020.
+ CHECK_ALIGNED(offset_, 2); // Multiple of 4.
+
const uint32_t offset_mask = (1 << 12) - 1;
uint32_t encoding = encodingArm();
uint32_t offset = encoding & offset_mask;
- CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
- CHECK_ALIGNED(offset, 2); // Multiple of 4.
CHECK((am_ == Offset) || (am_ == NegOffset));
uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
if (am_ == Offset) {
@@ -298,13 +299,13 @@ bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
case kLoadSignedHalfword:
case kLoadUnsignedHalfword:
case kLoadWordPair:
- return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ return IsAbsoluteUint<8>(offset); // Addressing mode 3.
case kLoadUnsignedByte:
case kLoadWord:
- return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ return IsAbsoluteUint<12>(offset); // Addressing mode 2.
case kLoadSWord:
case kLoadDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -316,13 +317,13 @@ bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
switch (type) {
case kStoreHalfword:
case kStoreWordPair:
- return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ return IsAbsoluteUint<8>(offset); // Addressing mode 3.
case kStoreByte:
case kStoreWord:
- return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ return IsAbsoluteUint<12>(offset); // Addressing mode 2.
case kStoreSWord:
case kStoreDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -336,12 +337,12 @@ bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
case kLoadUnsignedHalfword:
case kLoadUnsignedByte:
case kLoadWord:
- return IsAbsoluteUint(12, offset);
+ return IsAbsoluteUint<12>(offset);
case kLoadSWord:
case kLoadDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
case kLoadWordPair:
- return IsAbsoluteUint(10, offset);
+ return IsAbsoluteUint<10>(offset);
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -354,12 +355,12 @@ bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
case kStoreHalfword:
case kStoreByte:
case kStoreWord:
- return IsAbsoluteUint(12, offset);
+ return IsAbsoluteUint<12>(offset);
case kStoreSWord:
case kStoreDWord:
- return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ return IsAbsoluteUint<10>(offset); // VFP addressing mode.
case kStoreWordPair:
- return IsAbsoluteUint(10, offset);
+ return IsAbsoluteUint<10>(offset);
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 8d1fb60..9579691 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1254,7 +1254,7 @@ void Arm32Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR
void Arm32Assembler::svc(uint32_t imm24) {
- CHECK(IsUint(24, imm24)) << imm24;
+ CHECK(IsUint<24>(imm24)) << imm24;
int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
Emit(encoding);
}
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 5383c28..6d0571e 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2080,7 +2080,7 @@ void Thumb2Assembler::vmstat(Condition cond) { // VMRS APSR_nzcv, FPSCR.
void Thumb2Assembler::svc(uint32_t imm8) {
- CHECK(IsUint(8, imm8)) << imm8;
+ CHECK(IsUint<8>(imm8)) << imm8;
int16_t encoding = B15 | B14 | B12 |
B11 | B10 | B9 | B8 |
imm8;
@@ -2089,7 +2089,7 @@ void Thumb2Assembler::svc(uint32_t imm8) {
void Thumb2Assembler::bkpt(uint16_t imm8) {
- CHECK(IsUint(8, imm8)) << imm8;
+ CHECK(IsUint<8>(imm8)) << imm8;
int16_t encoding = B15 | B13 | B12 |
B11 | B10 | B9 |
imm8;
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 03744e4..8f4208b 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1290,7 +1290,7 @@ void X86Assembler::j(Condition condition, Label* label) {
static const int kLongSize = 6;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0x70 + condition);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
@@ -1325,7 +1325,7 @@ void X86Assembler::jmp(Label* label) {
static const int kLongSize = 5;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0xEB);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 3a44ace..2dde907 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -35,10 +35,10 @@ class Immediate : public ValueObject {
int32_t value() const { return value_; }
- bool is_int8() const { return IsInt(8, value_); }
- bool is_uint8() const { return IsUint(8, value_); }
- bool is_int16() const { return IsInt(16, value_); }
- bool is_uint16() const { return IsUint(16, value_); }
+ bool is_int8() const { return IsInt<8>(value_); }
+ bool is_uint8() const { return IsUint<8>(value_); }
+ bool is_int16() const { return IsInt<16>(value_); }
+ bool is_uint16() const { return IsUint<16>(value_); }
private:
const int32_t value_;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 556fa9b..f2704b7 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1515,7 +1515,7 @@ void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
// See whether imm can be represented as a sign-extended 8bit value.
int32_t v32 = static_cast<int32_t>(imm.value());
- if (IsInt32(8, v32)) {
+ if (IsInt<8>(v32)) {
// Sign-extension works.
EmitUint8(0x6B);
EmitOperand(reg.LowBits(), Operand(reg));
@@ -1555,7 +1555,7 @@ void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
// See whether imm can be represented as a sign-extended 8bit value.
int64_t v64 = imm.value();
- if (IsInt64(8, v64)) {
+ if (IsInt<8>(v64)) {
// Sign-extension works.
EmitUint8(0x6B);
EmitOperand(reg.LowBits(), Operand(reg));
@@ -1705,7 +1705,7 @@ void X86_64Assembler::notq(CpuRegister reg) {
void X86_64Assembler::enter(const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xC8);
- CHECK(imm.is_uint16());
+ CHECK(imm.is_uint16()) << imm.value();
EmitUint8(imm.value() & 0xFF);
EmitUint8((imm.value() >> 8) & 0xFF);
EmitUint8(0x00);
@@ -1759,7 +1759,7 @@ void X86_64Assembler::j(Condition condition, Label* label) {
static const int kLongSize = 6;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0x70 + condition);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
@@ -1796,7 +1796,7 @@ void X86_64Assembler::jmp(Label* label) {
static const int kLongSize = 5;
int offset = label->Position() - buffer_.Size();
CHECK_LE(offset, 0);
- if (IsInt(8, offset - kShortSize)) {
+ if (IsInt<8>(offset - kShortSize)) {
EmitUint8(0xEB);
EmitUint8((offset - kShortSize) & 0xFF);
} else {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index a1c704e..5dfcf45 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -42,15 +42,11 @@ class Immediate : public ValueObject {
int64_t value() const { return value_; }
- bool is_int8() const { return IsInt(8, value_); }
- bool is_uint8() const { return IsUint(8, value_); }
- bool is_int16() const { return IsInt(16, value_); }
- bool is_uint16() const { return IsUint(16, value_); }
- bool is_int32() const {
- // This does not work on 32b machines: return IsInt(32, value_);
- int64_t limit = static_cast<int64_t>(1) << 31;
- return (-limit <= value_) && (value_ < limit);
- }
+ bool is_int8() const { return IsInt<8>(value_); }
+ bool is_uint8() const { return IsUint<8>(value_); }
+ bool is_int16() const { return IsInt<16>(value_); }
+ bool is_uint16() const { return IsUint<16>(value_); }
+ bool is_int32() const { return IsInt<32>(value_); }
private:
const int64_t value_;
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 6df4144..00f508b 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -44,10 +44,10 @@ static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerfu
TEST(AssemblerX86_64, SignExtension) {
// 32bit.
for (int32_t i = 0; i < 128; i++) {
- EXPECT_TRUE(IsInt32(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = 128; i < 255; i++) {
- EXPECT_FALSE(IsInt32(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some higher ones randomly.
std::random_device rd;
@@ -55,54 +55,65 @@ TEST(AssemblerX86_64, SignExtension) {
std::uniform_int_distribution<int32_t> uniform_dist(256, INT32_MAX);
for (size_t i = 0; i < kRandomIterations; i++) {
int32_t value = uniform_dist(e1);
- EXPECT_FALSE(IsInt32(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// Negative ones.
for (int32_t i = -1; i >= -128; i--) {
- EXPECT_TRUE(IsInt32(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = -129; i > -256; i--) {
- EXPECT_FALSE(IsInt32(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some lower ones randomly.
std::uniform_int_distribution<int32_t> uniform_dist2(INT32_MIN, -256);
for (size_t i = 0; i < 100; i++) {
int32_t value = uniform_dist2(e1);
- EXPECT_FALSE(IsInt32(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// 64bit.
for (int64_t i = 0; i < 128; i++) {
- EXPECT_TRUE(IsInt64(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int32_t i = 128; i < 255; i++) {
- EXPECT_FALSE(IsInt64(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some higher ones randomly.
std::uniform_int_distribution<int64_t> uniform_dist3(256, INT64_MAX);
for (size_t i = 0; i < 100; i++) {
int64_t value = uniform_dist3(e1);
- EXPECT_FALSE(IsInt64(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
// Negative ones.
for (int64_t i = -1; i >= -128; i--) {
- EXPECT_TRUE(IsInt64(8, i)) << i;
+ EXPECT_TRUE(IsInt<8>(i)) << i;
}
for (int64_t i = -129; i > -256; i--) {
- EXPECT_FALSE(IsInt64(8, i)) << i;
+ EXPECT_FALSE(IsInt<8>(i)) << i;
}
// Do some lower ones randomly.
std::uniform_int_distribution<int64_t> uniform_dist4(INT64_MIN, -256);
for (size_t i = 0; i < kRandomIterations; i++) {
int64_t value = uniform_dist4(e1);
- EXPECT_FALSE(IsInt64(8, value)) << value;
+ EXPECT_FALSE(IsInt<8>(value)) << value;
}
+
+ int64_t value = INT64_C(0x1200000010);
+ x86_64::Immediate imm(value);
+ EXPECT_FALSE(imm.is_int8());
+ EXPECT_FALSE(imm.is_int16());
+ EXPECT_FALSE(imm.is_int32());
+ value = INT64_C(0x8000000000000001);
+ x86_64::Immediate imm2(value);
+ EXPECT_FALSE(imm2.is_int8());
+ EXPECT_FALSE(imm2.is_int16());
+ EXPECT_FALSE(imm2.is_int32());
}
struct X86_64CpuRegisterCompare {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3592d2c..33456cf 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4812,7 +4812,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
if (klass->IsInterface()) {
// No vtable.
size_t count = klass->NumVirtualMethods();
- if (!IsUint(16, count)) {
+ if (!IsUint<16>(count)) {
ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zd", count);
return false;
}
@@ -5033,7 +5033,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
local_method->SetMethodIndex(actual_count);
++actual_count;
}
- if (!IsUint(16, actual_count)) {
+ if (!IsUint<16>(actual_count)) {
ThrowClassFormatError(klass.Get(), "Too many methods defined on class: %zd", actual_count);
return false;
}
@@ -5049,7 +5049,7 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
klass->SetVTable(vtable.Get());
} else {
CHECK_EQ(klass.Get(), GetClassRoot(kJavaLangObject));
- if (!IsUint(16, num_virtual_methods)) {
+ if (!IsUint<16>(num_virtual_methods)) {
ThrowClassFormatError(klass.Get(), "Too many methods: %d",
static_cast<int>(num_virtual_methods));
return false;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 94d62db..19a4bd0 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -1164,15 +1164,15 @@ void EncodedStaticFieldValueIterator::Next() {
break;
case kByte:
jval_.i = ReadSignedInt(ptr_, value_arg);
- CHECK(IsInt(8, jval_.i));
+ CHECK(IsInt<8>(jval_.i));
break;
case kShort:
jval_.i = ReadSignedInt(ptr_, value_arg);
- CHECK(IsInt(16, jval_.i));
+ CHECK(IsInt<16>(jval_.i));
break;
case kChar:
jval_.i = ReadUnsignedInt(ptr_, value_arg, false);
- CHECK(IsUint(16, jval_.i));
+ CHECK(IsUint<16>(jval_.i));
break;
case kInt:
jval_.i = ReadSignedInt(ptr_, value_arg);
diff --git a/runtime/utils.h b/runtime/utils.h
index 1c2576c..0fbc9df 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -22,6 +22,7 @@
#include <limits>
#include <memory>
#include <string>
+#include <type_traits>
#include <vector>
#include "arch/instruction_set.h"
@@ -115,32 +116,45 @@ static inline bool IsInt(int N, intptr_t value) {
return (-limit <= value) && (value < limit);
}
-static inline bool IsInt32(int N, int32_t value) {
- CHECK_LT(0, N);
- CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int32_t));
- int32_t limit = static_cast<int32_t>(1) << (N - 1);
- return (-limit <= value) && (value < limit);
-}
-
-static inline bool IsInt64(int N, int64_t value) {
- CHECK_LT(0, N);
- CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int64_t));
- int64_t limit = static_cast<int64_t>(1) << (N - 1);
- return (-limit <= value) && (value < limit);
-}
-
-static inline bool IsUint(int N, intptr_t value) {
- CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerIntPtrT);
- intptr_t limit = static_cast<intptr_t>(1) << N;
- return (0 <= value) && (value < limit);
-}
-
-static inline bool IsAbsoluteUint(int N, intptr_t value) {
- CHECK_LT(0, N);
- CHECK_LT(N, kBitsPerIntPtrT);
- if (value < 0) value = -value;
- return IsUint(N, value);
+template <typename T>
+static constexpr T GetIntLimit(size_t bits) {
+ return
+ DCHECK_CONSTEXPR(bits > 0, "bits cannot be zero", 0)
+ DCHECK_CONSTEXPR(bits < kBitsPerByte * sizeof(T), "kBits must be < max.", 0)
+ static_cast<T>(1) << (bits - 1);
+}
+
+template <size_t kBits, typename T>
+static constexpr bool IsInt(T value) {
+ static_assert(kBits > 0, "kBits cannot be zero.");
+ static_assert(kBits <= kBitsPerByte * sizeof(T), "kBits must be <= max.");
+ static_assert(std::is_signed<T>::value, "Needs a signed type.");
+ // Corner case for "use all bits." Can't use the limits, as they would overflow, but it is
+ // trivially true.
+ return (kBits == kBitsPerByte * sizeof(T)) ?
+ true :
+ (-GetIntLimit<T>(kBits) <= value) && (value < GetIntLimit<T>(kBits));
+}
+
+template <size_t kBits, typename T>
+static constexpr bool IsUint(T value) {
+ static_assert(kBits > 0, "kBits cannot be zero.");
+ static_assert(kBits <= kBitsPerByte * sizeof(T), "kBits must be <= max.");
+ static_assert(std::is_integral<T>::value, "Needs an integral type.");
+ // Corner case for "use all bits." Can't use the limits, as they would overflow, but it is
+ // trivially true.
+ return (0 <= value) &&
+ (kBits == kBitsPerByte * sizeof(T) ||
+ (static_cast<typename std::make_unsigned<T>::type>(value) <=
+ GetIntLimit<typename std::make_unsigned<T>::type>(kBits + 1) - 1));
+}
+
+template <size_t kBits, typename T>
+static constexpr bool IsAbsoluteUint(T value) {
+ static_assert(kBits <= kBitsPerByte * sizeof(T), "kBits must be < max.");
+ return (kBits == kBitsPerByte * sizeof(T)) ?
+ true :
+ IsUint<kBits, T>(value < 0 ? -value : value);
}
static inline uint16_t Low16Bits(uint32_t value) {