summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-03-26 12:53:19 -0700
committerMathieu Chartier <mathieuc@google.com>2014-03-26 16:15:19 -0700
commit0767c9a85a97e0e16c39a63c31de6c543304b0a4 (patch)
tree6a809f72c85ef249cd62172a7d0b14827609f8e8 /runtime/gc
parent223efbe5164f6fe83cf04e7f9121adb29b8dd231 (diff)
downloadart-0767c9a85a97e0e16c39a63c31de6c543304b0a4.zip
art-0767c9a85a97e0e16c39a63c31de6c543304b0a4.tar.gz
art-0767c9a85a97e0e16c39a63c31de6c543304b0a4.tar.bz2
Add valgrind support to large object map space.
Added valgrind support to large object map space. Bug: 7392044 Change-Id: I1456f46414e1fa59ebcc2190ec00576dae26d623
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/space/large_object_space.cc50
-rw-r--r--runtime/gc/space/large_object_space.h8
3 files changed, 55 insertions, 5 deletions
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7827261..02e7e3f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -708,9 +708,11 @@ Heap::~Heap() {
allocation_stack_->Reset();
live_stack_->Reset();
STLDeleteValues(&mod_union_tables_);
+ STLDeleteValues(&remembered_sets_);
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
+ delete heap_trim_request_lock_;
VLOG(heap) << "Finished ~Heap()";
}
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 1ca132e..2fc67ec 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -29,6 +29,50 @@ namespace art {
namespace gc {
namespace space {
+class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+ public:
+ explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+ }
+
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) OVERRIDE {
+ mirror::Object* obj =
+ LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+ usable_size);
+ mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+ kValgrindRedZoneBytes);
+ if (usable_size != nullptr) {
+ *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
+ }
+ return object_without_rdz;
+ }
+
+ virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
+ }
+
+ virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+ return LargeObjectMapSpace::Free(self, object_with_rdz);
+ }
+
+ bool Contains(const mirror::Object* obj) const OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ return LargeObjectMapSpace::Contains(object_with_rdz);
+ }
+
+ private:
+ static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+};
+
void LargeObjectSpace::SwapBitmaps() {
live_objects_.swap(mark_objects_);
// Swap names to get more descriptive diagnostics.
@@ -53,7 +97,11 @@ LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
- return new LargeObjectMapSpace(name);
+ if (RUNNING_ON_VALGRIND > 0) {
+ return new ValgrindLargeObjectMapSpace(name);
+ } else {
+ return new LargeObjectMapSpace(name);
+ }
}
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b1b0c3c..eb01325 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -91,7 +91,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
};
// A discontinuous large object space implemented by individual mmap/munmap calls.
-class LargeObjectMapSpace FINAL : public LargeObjectSpace {
+class LargeObjectMapSpace : public LargeObjectSpace {
public:
// Creates a large object space. Allocations into the large object space use memory maps instead
// of malloc.
@@ -106,7 +106,7 @@ class LargeObjectMapSpace FINAL : public LargeObjectSpace {
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- private:
+ protected:
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
@@ -115,7 +115,7 @@ class LargeObjectMapSpace FINAL : public LargeObjectSpace {
std::vector<mirror::Object*,
accounting::GcAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
- accounting::GcAllocator<std::pair<const mirror::Object*, MemMap*> > > MemMaps;
+ accounting::GcAllocator<std::pair<mirror::Object*, MemMap*> > > MemMaps;
MemMaps mem_maps_ GUARDED_BY(lock_);
};
@@ -150,7 +150,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
void Dump(std::ostream& os) const;
- private:
+ protected:
static const size_t kAlignment = kPageSize;
class AllocationHeader {