diff options
author | Mathieu Chartier <mathieuc@google.com> | 2014-07-14 10:16:05 -0700 |
---|---|---|
committer | Mathieu Chartier <mathieuc@google.com> | 2014-07-14 10:58:09 -0700 |
commit | fd22d5bada15d95b5ea8ab5a4dda39077e1a54ee (patch) | |
tree | 3dc5aaa74f1272c357d339c3c61d7e1ed0aececf /runtime/gc | |
parent | e8b8086388159be5fecb23ae6185e70f3dfb5da6 (diff) | |
download | art-fd22d5bada15d95b5ea8ab5a4dda39077e1a54ee.zip art-fd22d5bada15d95b5ea8ab5a4dda39077e1a54ee.tar.gz art-fd22d5bada15d95b5ea8ab5a4dda39077e1a54ee.tar.bz2 |
Fix infinite loop when calling SetStatus after OOM.
There was a problem where we would call SetStatus when we had an OOM
error. This results in attempting to find the ExceptionInInitializer
class which if not loaded does more allocations resulting in an
infinite loop.
Also some cleanup addressing other comments.
Bug: 16082350
Change-Id: I5c1e638a03ddf700ab4e9cad9a3077d2b1b26c43
Diffstat (limited to 'runtime/gc')
-rw-r--r-- | runtime/gc/accounting/card_table.cc | 2 | ||||
-rw-r--r-- | runtime/gc/heap-inl.h | 10 |
2 files changed, 6 insertions, 6 deletions
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc index a95c003..ceb42e5 100644 --- a/runtime/gc/accounting/card_table.cc +++ b/runtime/gc/accounting/card_table.cc @@ -83,8 +83,6 @@ CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) { CardTable::CardTable(MemMap* mem_map, byte* biased_begin, size_t offset) : mem_map_(mem_map), biased_begin_(biased_begin), offset_(offset) { - byte* __attribute__((unused)) begin = mem_map_->Begin() + offset_; - byte* __attribute__((unused)) end = mem_map_->End(); } void CardTable::ClearSpaceCards(space::ContinuousSpace* space) { diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 419af30..7d3fd2d 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -63,6 +63,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas // If we have a thread local allocation we don't need to update bytes allocated. if (allocator == kAllocatorTypeTLAB && byte_count <= self->TlabSize()) { obj = self->AllocTlab(byte_count); + DCHECK(obj != nullptr) << "AllocTlab can't fail"; obj->SetClass(klass); if (kUseBakerOrBrooksReadBarrier) { if (kUseBrooksReadBarrier) { @@ -71,7 +72,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas obj->AssertReadBarrierPointer(); } bytes_allocated = byte_count; - pre_fence_visitor(obj, bytes_allocated); + usable_size = bytes_allocated; + pre_fence_visitor(obj, usable_size); QuasiAtomic::ThreadFenceForConstructor(); } else { obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, @@ -111,13 +113,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas WriteBarrierField(obj, mirror::Object::ClassOffset(), klass); } pre_fence_visitor(obj, usable_size); - if (kIsDebugBuild && Runtime::Current()->IsStarted()) { - CHECK_LE(obj->SizeOf(), usable_size); - } new_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated)) + bytes_allocated; } + if (kIsDebugBuild && Runtime::Current()->IsStarted()) { + CHECK_LE(obj->SizeOf(), usable_size); + } // TODO: Deprecate. if (kInstrumented) { if (Runtime::Current()->HasStatsEnabled()) { |