summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc4
-rw-r--r--compiler/image_writer.cc7
-rw-r--r--compiler/optimizing/live_interval_test.cc6
-rw-r--r--compiler/optimizing/register_allocator.cc39
-rw-r--r--compiler/optimizing/register_allocator_test.cc9
-rw-r--r--compiler/optimizing/ssa_builder.cc6
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc25
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h159
-rw-r--r--compiler/optimizing/stack_map_stream.h3
-rw-r--r--runtime/check_jni.cc16
-rw-r--r--runtime/gc/collector/mark_sweep.cc9
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/mirror/art_method-inl.h2
-rw-r--r--runtime/parsed_options.cc4
-rw-r--r--runtime/runtime_options.def2
15 files changed, 154 insertions, 139 deletions
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 2db5a36..d276457 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -89,13 +89,13 @@ class QuickCFITest : public CFITest {
m2l->CompilerInitializeRegAlloc();
for (const auto& info : m2l->reg_pool_->core_regs_) {
if (m2l->num_core_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
- m2l->core_spill_mask_ |= 1 << info->GetReg().GetReg();
+ m2l->core_spill_mask_ |= 1 << info->GetReg().GetRegNum();
m2l->num_core_spills_++;
}
}
for (const auto& info : m2l->reg_pool_->sp_regs_) {
if (m2l->num_fp_spills_ < 2 && !info->IsTemp() && !info->InUse()) {
- m2l->fp_spill_mask_ |= 1 << info->GetReg().GetReg();
+ m2l->fp_spill_mask_ |= 1 << info->GetReg().GetRegNum();
m2l->num_fp_spills_++;
}
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 2420254..670c897 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -70,6 +70,7 @@ namespace art {
// Separate objects into multiple bins to optimize dirty memory use.
static constexpr bool kBinObjects = true;
+static constexpr bool kComputeEagerResolvedStrings = false;
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -645,7 +646,11 @@ void ImageWriter::ProcessStrings() {
LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
<< num_chars << " prefix saved chars=" << prefix_saved_chars;
}
- ComputeEagerResolvedStrings();
+ // Calling this can in theory fill in some resolved strings. However, in practice it seems to
+ // never resolve any.
+ if (kComputeEagerResolvedStrings) {
+ ComputeEagerResolvedStrings();
+ }
}
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
index 28000c1..405f261 100644
--- a/compiler/optimizing/live_interval_test.cc
+++ b/compiler/optimizing/live_interval_test.cc
@@ -84,13 +84,13 @@ TEST(LiveInterval, Covers) {
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ ASSERT_FALSE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(11));
- ASSERT_TRUE(interval->Covers(14));
- ASSERT_TRUE(interval->Covers(15));
- ASSERT_FALSE(interval->Covers(0));
ASSERT_FALSE(interval->Covers(12));
ASSERT_FALSE(interval->Covers(13));
+ ASSERT_TRUE(interval->Covers(14));
+ ASSERT_TRUE(interval->Covers(15));
ASSERT_FALSE(interval->Covers(16));
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index a02b1da..6350b35 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -332,6 +332,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
}
current->AddSafepoint(safepoint);
}
+ current->ResetSearchCache();
// Some instructions define their output in fixed register/stack slot. We need
// to ensure we know these locations before doing register allocation. For a
@@ -666,6 +667,7 @@ static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* fr
DCHECK(!interval->IsHighInterval());
// Note that the same instruction may occur multiple times in the input list,
// so `free_until` may have changed already.
+ // Since `position` is not the current scan position, we need to use CoversSlow.
if (interval->IsDeadAt(position)) {
// Set the register to be free. Note that inactive intervals might later
// update this.
@@ -674,12 +676,12 @@ static void FreeIfNotCoverAt(LiveInterval* interval, size_t position, size_t* fr
DCHECK(interval->GetHighInterval()->IsDeadAt(position));
free_until[interval->GetHighInterval()->GetRegister()] = kMaxLifetimePosition;
}
- } else if (!interval->Covers(position)) {
+ } else if (!interval->CoversSlow(position)) {
// The interval becomes inactive at `defined_by`. We make its register
// available only until the next use strictly after `defined_by`.
free_until[interval->GetRegister()] = interval->FirstUseAfter(position);
if (interval->HasHighInterval()) {
- DCHECK(!interval->GetHighInterval()->Covers(position));
+ DCHECK(!interval->GetHighInterval()->CoversSlow(position));
free_until[interval->GetHighInterval()->GetRegister()] = free_until[interval->GetRegister()];
}
}
@@ -720,7 +722,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
// the linear scan algorithm. So we use `defined_by`'s end lifetime
// position to check whether the input is dead or is inactive after
// `defined_by`.
- DCHECK(interval->Covers(defined_by->GetLifetimePosition()));
+ DCHECK(interval->CoversSlow(defined_by->GetLifetimePosition()));
size_t position = defined_by->GetLifetimePosition() + 1;
FreeIfNotCoverAt(interval, position, free_until);
}
@@ -1438,7 +1440,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
use = use->GetNext();
}
while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
- DCHECK(current->Covers(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
+ DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
LocationSummary* locations = use->GetUser()->GetLocations();
if (use->GetIsEnvironment()) {
locations->SetEnvironmentAt(use->GetInputIndex(), source);
@@ -1475,7 +1477,7 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
safepoint_position != nullptr;
safepoint_position = safepoint_position->GetNext()) {
- DCHECK(current->Covers(safepoint_position->GetPosition()));
+ DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
LocationSummary* locations = safepoint_position->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
@@ -1538,35 +1540,14 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
return;
}
- // Intervals end at the lifetime end of a block. The decrement by one
- // ensures the `Cover` call will return true.
- size_t from_position = from->GetLifetimeEnd() - 1;
- size_t to_position = to->GetLifetimeStart();
-
- LiveInterval* destination = nullptr;
- LiveInterval* source = nullptr;
-
- LiveInterval* current = interval;
-
- // Check the intervals that cover `from` and `to`.
- while ((current != nullptr) && (source == nullptr || destination == nullptr)) {
- if (current->Covers(from_position)) {
- DCHECK(source == nullptr);
- source = current;
- }
- if (current->Covers(to_position)) {
- DCHECK(destination == nullptr);
- destination = current;
- }
-
- current = current->GetNextSibling();
- }
+ // Find the intervals that cover `from` and `to`.
+ LiveInterval* destination = interval->GetSiblingAt(to->GetLifetimeStart());
+ LiveInterval* source = interval->GetSiblingAt(from->GetLifetimeEnd() - 1);
if (destination == source) {
// Interval was not split.
return;
}
-
DCHECK(destination != nullptr && source != nullptr);
if (!destination->HasRegister()) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index c307d98..182cd0e 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -433,18 +433,15 @@ TEST(RegisterAllocatorTest, FreeUntil) {
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
- interval->SetRegister(0);
+ LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
interval->AddRange(40, 50);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
- interval->SetRegister(0);
+ interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
interval->AddRange(20, 30);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
- interval->SetRegister(0);
+ interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
interval->AddRange(60, 70);
register_allocator.inactive_.Add(interval);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5c3d9bf..7a252af 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -186,11 +186,9 @@ void SsaBuilder::FixNullConstantType() {
HInstruction* right = equality_instr->InputAt(1);
HInstruction* null_instr = nullptr;
- if ((left->GetType() == Primitive::kPrimNot)
- && (right->IsNullConstant() || right->IsIntConstant())) {
+ if ((left->GetType() == Primitive::kPrimNot) && right->IsIntConstant()) {
null_instr = right;
- } else if ((right->GetType() == Primitive::kPrimNot)
- && (left->IsNullConstant() || left->IsIntConstant())) {
+ } else if ((right->GetType() == Primitive::kPrimNot) && left->IsIntConstant()) {
null_instr = left;
} else {
continue;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 302df2a..ea0e7c3 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -402,11 +402,11 @@ int LiveInterval::FindHintAtDefinition() const {
for (size_t i = 0, e = defined_by_->InputCount(); i < e; ++i) {
HInstruction* input = defined_by_->InputAt(i);
size_t end = predecessors.Get(i)->GetLifetimeEnd();
- const LiveInterval& input_interval = input->GetLiveInterval()->GetIntervalAt(end - 1);
- if (input_interval.GetEnd() == end) {
+ LiveInterval* input_interval = input->GetLiveInterval()->GetSiblingAt(end - 1);
+ if (input_interval->GetEnd() == end) {
// If the input dies at the end of the predecessor, we know its register can
// be reused.
- Location input_location = input_interval.ToLocation();
+ Location input_location = input_interval->ToLocation();
if (input_location.IsRegisterKind()) {
DCHECK(SameRegisterKind(input_location));
return RegisterOrLowRegister(input_location);
@@ -418,12 +418,12 @@ int LiveInterval::FindHintAtDefinition() const {
Location out = locations->Out();
if (out.IsUnallocated() && out.GetPolicy() == Location::kSameAsFirstInput) {
// Try to use the same register as the first input.
- const LiveInterval& input_interval =
- GetDefinedBy()->InputAt(0)->GetLiveInterval()->GetIntervalAt(GetStart() - 1);
- if (input_interval.GetEnd() == GetStart()) {
+ LiveInterval* input_interval =
+ GetDefinedBy()->InputAt(0)->GetLiveInterval()->GetSiblingAt(GetStart() - 1);
+ if (input_interval->GetEnd() == GetStart()) {
// If the input dies at the start of this instruction, we know its register can
// be reused.
- Location location = input_interval.ToLocation();
+ Location location = input_interval->ToLocation();
if (location.IsRegisterKind()) {
DCHECK(SameRegisterKind(location));
return RegisterOrLowRegister(location);
@@ -487,16 +487,17 @@ Location LiveInterval::ToLocation() const {
}
Location LiveInterval::GetLocationAt(size_t position) {
- return GetIntervalAt(position).ToLocation();
+ LiveInterval* sibling = GetSiblingAt(position);
+ DCHECK(sibling != nullptr);
+ return sibling->ToLocation();
}
-const LiveInterval& LiveInterval::GetIntervalAt(size_t position) {
+LiveInterval* LiveInterval::GetSiblingAt(size_t position) {
LiveInterval* current = this;
- while (!current->Covers(position)) {
+ while (current != nullptr && !current->IsDefinedAt(position)) {
current = current->GetNextSibling();
- DCHECK(current != nullptr);
}
- return *current;
+ return current;
}
} // namespace art
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 98f98a2..8eb98a1 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -274,8 +274,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
size_t start_block_position = instruction->GetBlock()->GetLifetimeStart();
if (first_range_ == nullptr) {
// First time we see a use of that interval.
- first_range_ = last_range_ = new (allocator_) LiveRange(
- start_block_position, position, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start_block_position, position, nullptr);
} else if (first_range_->GetStart() == start_block_position) {
// There is a use later in the same block or in a following block.
// Note that in such a case, `AddRange` for the whole blocks has been called
@@ -290,7 +290,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// predecessor/successor. When two blocks are predecessor/successor, the
// liveness algorithm has called `AddRange` before arriving in this method,
// and the check line 205 would succeed.
- first_range_ = new (allocator_) LiveRange(start_block_position, position, first_range_);
+ first_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start_block_position, position, first_range_);
}
}
@@ -302,7 +303,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
void AddRange(size_t start, size_t end) {
if (first_range_ == nullptr) {
- first_range_ = last_range_ = new (allocator_) LiveRange(start, end, first_range_);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start, end, first_range_);
} else if (first_range_->GetStart() == end) {
// There is a use in the following block.
first_range_->start_ = start;
@@ -311,7 +313,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
} else {
DCHECK_GT(first_range_->GetStart(), end);
// There is a hole in the interval. Create a new range.
- first_range_ = new (allocator_) LiveRange(start, end, first_range_);
+ first_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, first_range_);
}
}
@@ -328,15 +330,15 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
if (after_loop == nullptr) {
// Uses are only in the loop.
- first_range_ = last_range_ = new (allocator_) LiveRange(start, end, nullptr);
+ first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
- first_range_ = after_loop;
+ first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
first_range_->start_ = start;
} else {
// The use after the loop is after a lifetime hole.
DCHECK(last_in_loop != nullptr);
- first_range_ = last_in_loop;
+ first_range_ = range_search_start_ = last_in_loop;
first_range_->start_ = start;
first_range_->end_ = end;
}
@@ -357,7 +359,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// Instruction without uses.
DCHECK(!defined_by_->HasNonEnvironmentUses());
DCHECK(from == defined_by_->GetLifetimePosition());
- first_range_ = last_range_ = new (allocator_) LiveRange(from, from + 2, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(from, from + 2, nullptr);
}
}
@@ -372,21 +375,43 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
bool HasRegister() const { return register_ != kNoRegister; }
bool IsDeadAt(size_t position) const {
- return last_range_->GetEnd() <= position;
+ return GetEnd() <= position;
}
- bool Covers(size_t position) {
- return !IsDeadAt(position) && FindRangeAt(position) != nullptr;
+ bool IsDefinedAt(size_t position) const {
+ return GetStart() <= position && !IsDeadAt(position);
}
- /**
- * Returns the first intersection of this interval with `other`.
- */
- size_t FirstIntersectionWith(LiveInterval* other) const {
+ // Returns true if the interval contains a LiveRange covering `position`.
+ // The range at or immediately after the current position of linear scan
+ // is cached for better performance. If `position` can be smaller than
+ // that, CoversSlow should be used instead.
+ bool Covers(size_t position) {
+ LiveRange* candidate = FindRangeAtOrAfter(position, range_search_start_);
+ range_search_start_ = candidate;
+ return (candidate != nullptr && candidate->GetStart() <= position);
+ }
+
+ // Same as Covers but always tests all ranges.
+ bool CoversSlow(size_t position) const {
+ LiveRange* candidate = FindRangeAtOrAfter(position, first_range_);
+ return candidate != nullptr && candidate->GetStart() <= position;
+ }
+
+ // Returns the first intersection of this interval with `current`, which
+ // must be the interval currently being allocated by linear scan.
+ size_t FirstIntersectionWith(LiveInterval* current) const {
+ // Find the first range after the start of `current`. We use the search
+ // cache to improve performance.
+ DCHECK(GetStart() <= current->GetStart() || IsFixed());
+ LiveRange* other_range = current->first_range_;
+ LiveRange* my_range = FindRangeAtOrAfter(other_range->GetStart(), range_search_start_);
+ if (my_range == nullptr) {
+ return kNoLifetime;
+ }
+
// Advance both intervals and find the first matching range start in
// this interval.
- LiveRange* my_range = first_range_;
- LiveRange* other_range = other->first_range_;
do {
if (my_range->IsBefore(*other_range)) {
my_range = my_range->GetNext();
@@ -513,7 +538,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
DCHECK(!is_fixed_);
DCHECK_GT(position, GetStart());
- if (last_range_->GetEnd() <= position) {
+ if (GetEnd() <= position) {
// This range dies before `position`, no need to split.
return nullptr;
}
@@ -537,7 +562,6 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
new_interval->parent_ = parent_;
new_interval->first_use_ = first_use_;
- last_visited_range_ = nullptr;
LiveRange* current = first_range_;
LiveRange* previous = nullptr;
// Iterate over the ranges, and either find a range that covers this position, or
@@ -557,6 +581,12 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
last_range_ = previous;
previous->next_ = nullptr;
new_interval->first_range_ = current;
+ if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
+ // Search start point is inside `new_interval`. Change it to nullptr
+ // (i.e. the end of the interval) in the original interval.
+ range_search_start_ = nullptr;
+ }
+ new_interval->range_search_start_ = new_interval->first_range_;
return new_interval;
} else {
// This range covers position. We create a new last_range_ for this interval
@@ -572,6 +602,12 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
}
new_interval->first_range_ = current;
current->start_ = position;
+ if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
+ // Search start point is inside `new_interval`. Change it to `last_range`
+ // in the original interval. This is conservative but always correct.
+ range_search_start_ = last_range_;
+ }
+ new_interval->range_search_start_ = new_interval->first_range_;
return new_interval;
}
} while (current != nullptr);
@@ -643,8 +679,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// Returns the location of the interval following its siblings at `position`.
Location GetLocationAt(size_t position);
- // Finds the interval that covers `position`.
- const LiveInterval& GetIntervalAt(size_t position);
+ // Finds the sibling that is defined at `position`.
+ LiveInterval* GetSiblingAt(size_t position);
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
@@ -697,7 +733,8 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
high_or_low_interval_->high_or_low_interval_ = this;
if (first_range_ != nullptr) {
high_or_low_interval_->first_range_ = first_range_->Dup(allocator_);
- high_or_low_interval_->last_range_ = first_range_->GetLastRange();
+ high_or_low_interval_->last_range_ = high_or_low_interval_->first_range_->GetLastRange();
+ high_or_low_interval_->range_search_start_ = high_or_low_interval_->first_range_;
}
if (first_use_ != nullptr) {
high_or_low_interval_->first_use_ = first_use_->Dup(allocator_);
@@ -707,12 +744,14 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// Returns whether an interval, when it is non-split, is using
// the same register of one of its input.
bool IsUsingInputRegister() const {
+ CHECK(kIsDebugBuild) << "Function should be used only for DCHECKs";
if (defined_by_ != nullptr && !IsSplit()) {
for (HInputIterator it(defined_by_); !it.Done(); it.Advance()) {
LiveInterval* interval = it.Current()->GetLiveInterval();
- // Find the interval that covers `defined_by`_.
- while (interval != nullptr && !interval->Covers(defined_by_->GetLifetimePosition())) {
+ // Find the interval that covers `defined_by`_. Calls to this function
+ // are made outside the linear scan, hence we need to use CoversSlow.
+ while (interval != nullptr && !interval->CoversSlow(defined_by_->GetLifetimePosition())) {
interval = interval->GetNextSibling();
}
@@ -731,6 +770,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// the same register of one of its input. Note that this method requires
// IsUsingInputRegister() to be true.
bool CanUseInputRegister() const {
+ CHECK(kIsDebugBuild) << "Function should be used only for DCHECKs";
DCHECK(IsUsingInputRegister());
if (defined_by_ != nullptr && !IsSplit()) {
LocationSummary* locations = defined_by_->GetLocations();
@@ -740,8 +780,9 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
for (HInputIterator it(defined_by_); !it.Done(); it.Advance()) {
LiveInterval* interval = it.Current()->GetLiveInterval();
- // Find the interval that covers `defined_by`_.
- while (interval != nullptr && !interval->Covers(defined_by_->GetLifetimePosition())) {
+ // Find the interval that covers `defined_by`_. Calls to this function
+ // are made outside the linear scan, hence we need to use CoversSlow.
+ while (interval != nullptr && !interval->CoversSlow(defined_by_->GetLifetimePosition())) {
interval = interval->GetNextSibling();
}
@@ -750,7 +791,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
&& interval->GetRegister() == GetRegister()) {
// We found the input that has the same register. Check if it is live after
// `defined_by`_.
- return !interval->Covers(defined_by_->GetLifetimePosition() + 1);
+ return !interval->CoversSlow(defined_by_->GetLifetimePosition() + 1);
}
}
}
@@ -773,6 +814,12 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
return first_safepoint_;
}
+ // Resets the starting point for range-searching queries to the first range.
+ // Intervals must be reset prior to starting a new linear scan over them.
+ void ResetSearchCache() {
+ range_search_start_ = first_range_;
+ }
+
private:
LiveInterval(ArenaAllocator* allocator,
Primitive::Type type,
@@ -785,9 +832,9 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
: allocator_(allocator),
first_range_(nullptr),
last_range_(nullptr),
+ range_search_start_(nullptr),
first_safepoint_(nullptr),
last_safepoint_(nullptr),
- last_visited_range_(nullptr),
first_use_(nullptr),
type_(type),
next_sibling_(nullptr),
@@ -801,39 +848,29 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
high_or_low_interval_(nullptr),
defined_by_(defined_by) {}
- // Returns a LiveRange covering the given position or nullptr if no such range
- // exists in the interval.
- // This is a linear search optimized for multiple queries in a non-decreasing
- // position order typical for linear scan register allocation.
- LiveRange* FindRangeAt(size_t position) {
- // Make sure operations on the interval didn't leave us with a cached result
- // from a sibling.
+ // Searches for a LiveRange that either covers the given position or is the
+ // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+ // known to end before `position` can be skipped with `search_start`.
+ LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
- if (last_visited_range_ != nullptr) {
- DCHECK_GE(last_visited_range_->GetStart(), GetStart());
- DCHECK_LE(last_visited_range_->GetEnd(), GetEnd());
+ if (search_start != first_range_) {
+ // If we are not searching the entire list of ranges, make sure we do
+ // not skip the range we are searching for.
+ if (search_start == nullptr) {
+ DCHECK(IsDeadAt(position));
+ } else if (search_start->GetStart() > position) {
+ DCHECK_EQ(search_start, FindRangeAtOrAfter(position, first_range_));
+ }
}
}
- // If this method was called earlier on a lower position, use that result as
- // a starting point to save time. However, linear scan performs 3 scans:
- // integers, floats, and resolution. Instead of resetting at the beginning
- // of a scan, we do it here.
- LiveRange* current;
- if (last_visited_range_ != nullptr && position >= last_visited_range_->GetStart()) {
- current = last_visited_range_;
- } else {
- current = first_range_;
- }
- while (current != nullptr && current->GetEnd() <= position) {
- current = current->GetNext();
- }
- last_visited_range_ = current;
- if (current != nullptr && position >= current->GetStart()) {
- return current;
- } else {
- return nullptr;
+ LiveRange* range;
+ for (range = search_start;
+ range != nullptr && range->GetEnd() <= position;
+ range = range->GetNext()) {
+ continue;
}
+ return range;
}
ArenaAllocator* const allocator_;
@@ -843,14 +880,14 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
LiveRange* first_range_;
LiveRange* last_range_;
+ // The first range at or after the current position of a linear scan. It is
+ // used to optimize range-searching queries.
+ LiveRange* range_search_start_;
+
// Safepoints where this interval is live.
SafepointPosition* first_safepoint_;
SafepointPosition* last_safepoint_;
- // Last visited range. This is a range search optimization leveraging the fact
- // that the register allocator does a linear scan through the intervals.
- LiveRange* last_visited_range_;
-
// Uses of this interval. Note that this linked list is shared amongst siblings.
UsePosition* first_use_;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index a73c8d7..9a9e068 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -386,7 +386,8 @@ class StackMapStream : public ValueObject {
}
entry.live_dex_registers_mask->SetBit(dex_register);
- entry.dex_register_map_hash += (1 << dex_register);
+ entry.dex_register_map_hash +=
+ (1 << (dex_register % (sizeof(entry.dex_register_map_hash) * kBitsPerByte)));
entry.dex_register_map_hash += static_cast<uint32_t>(value);
entry.dex_register_map_hash += static_cast<uint32_t>(kind);
stack_maps_.Put(stack_maps_.Size() - 1, entry);
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index f94ebea..c6940d3 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -742,7 +742,6 @@ class ScopedCheck {
bool CheckNonHeapValue(char fmt, JniValueType arg) {
switch (fmt) {
- case '.': // ...
case 'p': // TODO: pointer - null or readable?
case 'v': // JavaVM*
case 'B': // jbyte
@@ -937,9 +936,6 @@ class ScopedCheck {
break;
}
break;
- case '.':
- *msg += "...";
- break;
default:
LOG(FATAL) << function_name_ << ": unknown trace format specifier: '" << fmt << "'";
}
@@ -1635,7 +1631,7 @@ class CheckJNI {
static jint ThrowNew(JNIEnv* env, jclass c, const char* message) {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
- JniValueType args[5] = {{.E = env}, {.c = c}, {.u = message}};
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.u = message}};
if (sc.Check(soa, true, "Ecu", args) && sc.CheckThrowableClass(soa, c)) {
JniValueType result;
result.i = baseEnv(env)->ThrowNew(env, c, message);
@@ -1811,7 +1807,7 @@ class CheckJNI {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, __FUNCTION__);
JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
- if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ if (sc.Check(soa, true, "Ecm", args) && sc.CheckInstantiableNonArray(soa, c) &&
sc.CheckConstructor(soa, mid)) {
JniValueType result;
result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
@@ -1834,7 +1830,7 @@ class CheckJNI {
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, __FUNCTION__);
JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
- if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ if (sc.Check(soa, true, "Ecm", args) && sc.CheckInstantiableNonArray(soa, c) &&
sc.CheckConstructor(soa, mid)) {
JniValueType result;
result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
@@ -2669,18 +2665,18 @@ class CheckJNI {
case kVirtual: {
DCHECK(c == nullptr);
JniValueType args[3] = {{.E = env}, {.L = obj}, {.m = mid}};
- checked = sc.Check(soa, true, "ELm.", args);
+ checked = sc.Check(soa, true, "ELm", args);
break;
}
case kDirect: {
JniValueType args[4] = {{.E = env}, {.L = obj}, {.c = c}, {.m = mid}};
- checked = sc.Check(soa, true, "ELcm.", args);
+ checked = sc.Check(soa, true, "ELcm", args);
break;
}
case kStatic: {
DCHECK(obj == nullptr);
JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
- checked = sc.Check(soa, true, "Ecm.", args);
+ checked = sc.Check(soa, true, "Ecm", args);
break;
}
default:
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index bb8d876..14eb80b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -16,6 +16,7 @@
#include "mark_sweep.h"
+#include <atomic>
#include <functional>
#include <numeric>
#include <climits>
@@ -600,7 +601,7 @@ class MarkStackTask : public Task {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
- android_memory_barrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
if (reinterpret_cast<uintptr_t>(ref) >=
static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
return;
@@ -740,11 +741,7 @@ size_t MarkSweep::GetThreadCount(bool paused) const {
if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
return 1;
}
- if (paused) {
- return heap_->GetParallelGCThreadCount() + 1;
- } else {
- return heap_->GetConcGCThreadCount() + 1;
- }
+ return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
}
void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 2f62798..066b4c5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -145,6 +145,8 @@ class Heap {
static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
// Primitive arrays larger than this size are put in the large object space.
static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+ // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
+ static constexpr bool kDefaultEnableParallelGC = false;
// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index fb427dc..a300d52 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -21,7 +21,7 @@
#include "art_field.h"
#include "class.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "dex_cache.h"
#include "dex_file.h"
#include "dex_file-inl.h"
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 0758b27..620a4bd 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -442,8 +442,8 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
}
// Default to number of processors minus one since the main GC thread also does work.
- args.SetIfMissing(M::ParallelGCThreads,
- static_cast<unsigned int>(sysconf(_SC_NPROCESSORS_CONF) - 1u));
+ args.SetIfMissing(M::ParallelGCThreads, gc::Heap::kDefaultEnableParallelGC ?
+ static_cast<unsigned int>(sysconf(_SC_NPROCESSORS_CONF) - 1u) : 0u);
// -Xverbose:
{
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index eff787a..8b504c1 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -50,7 +50,7 @@ RUNTIME_OPTIONS_KEY (MemoryKiB, HeapMaxFree, gc::He
RUNTIME_OPTIONS_KEY (MemoryKiB, NonMovingSpaceCapacity, gc::Heap::kDefaultNonMovingSpaceCapacity)
RUNTIME_OPTIONS_KEY (double, HeapTargetUtilization, gc::Heap::kDefaultTargetUtilization)
RUNTIME_OPTIONS_KEY (double, ForegroundHeapGrowthMultiplier, gc::Heap::kDefaultHeapGrowthMultiplier)
-RUNTIME_OPTIONS_KEY (unsigned int, ParallelGCThreads, 1u)
+RUNTIME_OPTIONS_KEY (unsigned int, ParallelGCThreads, 0u)
RUNTIME_OPTIONS_KEY (unsigned int, ConcGCThreads)
RUNTIME_OPTIONS_KEY (Memory<1>, StackSize) // -Xss
RUNTIME_OPTIONS_KEY (unsigned int, MaxSpinsBeforeThinLockInflation,Monitor::kDefaultMaxSpinsBeforeThinLockInflation)