summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--runtime/base/mutex.cc5
-rw-r--r--runtime/base/mutex.h4
-rw-r--r--runtime/gc/heap.cc11
-rw-r--r--runtime/mem_map.cc95
-rw-r--r--runtime/mem_map.h24
-rw-r--r--runtime/mem_map_test.cc61
6 files changed, 195 insertions, 5 deletions
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index d2b4e01..11698e2 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -35,6 +35,7 @@ Mutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
@@ -900,6 +901,10 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+ UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
+ DCHECK(mem_maps_lock_ == nullptr);
+ mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 522692e..68b450a 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -54,6 +54,7 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
+ kMemMapsLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
@@ -573,6 +574,9 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+ // Guards the maps in mem_map.
+ static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a6093ca..1efabff 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -341,6 +341,17 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
garbage_collectors_.push_back(concurrent_copying_collector_);
}
+ if (GetImageSpace() != nullptr && main_space_ != nullptr) {
+ // Check that there's no gap between the image space and the main
+ // space so that the immune region won't break (eg. due to a large
+ // object allocated in the gap).
+ bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
+ if (!no_gap) {
+ MemMap::DumpMaps(LOG(ERROR));
+ LOG(FATAL) << "There's a gap between the image space and the main space";
+ }
+ }
+
if (running_on_valgrind_) {
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
}
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 49e0b54..859269a 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -15,6 +15,7 @@
*/
#include "mem_map.h"
+#include "thread-inl.h"
#include <inttypes.h>
#include <backtrace/BacktraceMap.h>
@@ -51,6 +52,19 @@ static std::ostream& operator<<(
return os;
}
+std::ostream& operator<<(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
+ os << "MemMap:" << std::endl;
+ for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
+ void* base = it->first;
+ MemMap* map = it->second;
+ CHECK_EQ(base, map->BaseBegin());
+ os << *map << std::endl;
+ }
+ return os;
+}
+
+std::multimap<void*, MemMap*> MemMap::maps_;
+
#if defined(__LP64__) && !defined(__x86_64__)
// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
@@ -351,6 +365,19 @@ MemMap::~MemMap() {
if (result == -1) {
PLOG(FATAL) << "munmap failed";
}
+
+ // Remove it from maps_.
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ bool found = false;
+ for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
+ it != end && it->first == base_begin_; ++it) {
+ if (it->second == this) {
+ found = true;
+ maps_.erase(it);
+ break;
+ }
+ }
+ CHECK(found) << "MemMap not found";
}
MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
@@ -365,6 +392,10 @@ MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_beg
CHECK(begin_ != nullptr);
CHECK(base_begin_ != nullptr);
CHECK_NE(base_size_, 0U);
+
+ // Add it to maps_.
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
}
};
@@ -453,10 +484,68 @@ bool MemMap::Protect(int prot) {
return false;
}
+bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ CHECK(begin_map != nullptr);
+ CHECK(end_map != nullptr);
+ CHECK(HasMemMap(begin_map));
+ CHECK(HasMemMap(end_map));
+ CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
+ MemMap* map = begin_map;
+ while (map->BaseBegin() != end_map->BaseBegin()) {
+ MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
+ if (next_map == nullptr) {
+ // Found a gap.
+ return false;
+ }
+ map = next_map;
+ }
+ return true;
+}
+
+void MemMap::DumpMaps(std::ostream& os) {
+ DumpMaps(os, maps_);
+}
+
+void MemMap::DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ DumpMapsLocked(os, mem_maps);
+}
+
+void MemMap::DumpMapsLocked(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
+ os << mem_maps;
+}
+
+bool MemMap::HasMemMap(MemMap* map) {
+ void* base_begin = map->BaseBegin();
+ for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
+ it != end && it->first == base_begin; ++it) {
+ if (it->second == map) {
+ return true;
+ }
+ }
+ return false;
+}
+
+MemMap* MemMap::GetLargestMemMapAt(void* address) {
+ size_t largest_size = 0;
+ MemMap* largest_map = nullptr;
+ for (auto it = maps_.lower_bound(address), end = maps_.end();
+ it != end && it->first == address; ++it) {
+ MemMap* map = it->second;
+ CHECK(map != nullptr);
+ if (largest_size < map->BaseSize()) {
+ largest_size = map->BaseSize();
+ largest_map = map;
+ }
+ }
+ return largest_map;
+}
+
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
- os << StringPrintf("[MemMap: %s prot=0x%x %p-%p]",
- mem_map.GetName().c_str(), mem_map.GetProtect(),
- mem_map.BaseBegin(), mem_map.BaseEnd());
+ os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
+ mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
+ mem_map.GetName().c_str());
return os;
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 1411856..dc5909b 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -17,7 +17,10 @@
#ifndef ART_RUNTIME_MEM_MAP_H_
#define ART_RUNTIME_MEM_MAP_H_
+#include "base/mutex.h"
+
#include <string>
+#include <map>
#include <stddef.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
@@ -66,7 +69,7 @@ class MemMap {
std::string* error_msg);
// Releases the memory mapping
- ~MemMap();
+ ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
const std::string& GetName() const {
return name_;
@@ -110,9 +113,23 @@ class MemMap {
MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
std::string* error_msg);
+ static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
+ LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ static void DumpMaps(std::ostream& os)
+ LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+
private:
MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
- int prot);
+ int prot) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+
+ static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
+ LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ static void DumpMapsLocked(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+ static bool HasMemMap(MemMap* map)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
+ static MemMap* GetLargestMemMapAt(void* address)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
std::string name_;
byte* const begin_; // Start of data.
@@ -126,6 +143,9 @@ class MemMap {
static uintptr_t next_mem_pos_; // next memory location to check for low_4g extent
#endif
+ // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
+ static std::multimap<void*, MemMap*> maps_ GUARDED_BY(Locks::mem_maps_lock_);
+
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index c108a5f..fe76c92 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -250,4 +250,65 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
}
#endif
+TEST_F(MemMapTest, CheckNoGaps) {
+ std::string error_msg;
+ constexpr size_t kNumPages = 3;
+ // Map a 3-page mem map.
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
+ nullptr,
+ kPageSize * kNumPages,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ // Record the base address.
+ byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
+ // Unmap it.
+ map.reset();
+
+ // Map at the same address, but in page-sized separate mem maps,
+ // assuming the space at the address is still available.
+ std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
+ map_base,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
+ map_base + kPageSize,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
+ map_base + kPageSize * 2,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+
+ // One-map cases.
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+
+ // Two or three-map cases.
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+
+ // Unmap the middle one.
+ map1.reset();
+
+ // Should return false now that there's a gap in the middle.
+ ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+}
+
} // namespace art