summaryrefslogtreecommitdiffstats
path: root/runtime/mem_map_test.cc
diff options
context:
space:
mode:
authorHiroshi Yamauchi <yamauchi@google.com>2014-06-04 11:43:59 -0700
committerHiroshi Yamauchi <yamauchi@google.com>2014-06-04 15:50:46 -0700
commit3eed93dd5be03e5539827bebf0f414251a12e15e (patch)
tree186fa74420e2595c9b6fba9ea8de94756af9cef5 /runtime/mem_map_test.cc
parentbbdc5bc5fd5141711879a6c85d80ac45b7aad5d0 (diff)
downloadart-3eed93dd5be03e5539827bebf0f414251a12e15e.zip
art-3eed93dd5be03e5539827bebf0f414251a12e15e.tar.gz
art-3eed93dd5be03e5539827bebf0f414251a12e15e.tar.bz2
Verify there's no mem map gap for immune region not to break.
This adds code that verifies that there's no memory map gap between the image space and the main space so that the immune region functionality won't silently break. For example, if there's a gap and a large object is allocated in that gap, the large object is incorrectly part of the immune region and the marking breaks. Bug: 14059466 Change-Id: Ie6ed82988d74b6d0562ebbbaac96ee43c15b14a6
Diffstat (limited to 'runtime/mem_map_test.cc')
-rw-r--r--runtime/mem_map_test.cc61
1 files changed, 61 insertions, 0 deletions
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index c108a5f..fe76c92 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -250,4 +250,65 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
}
#endif
+TEST_F(MemMapTest, CheckNoGaps) {
+ std::string error_msg;
+ constexpr size_t kNumPages = 3;
+ // Map a 3-page mem map.
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
+ nullptr,
+ kPageSize * kNumPages,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ // Record the base address.
+ byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
+ // Unmap it.
+ map.reset();
+
+ // Map at the same address, but in page-sized separate mem maps,
+ // assuming the space at the address is still available.
+ std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
+ map_base,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
+ map_base + kPageSize,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
+ map_base + kPageSize * 2,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+
+ // One-map cases.
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+
+ // Two or three-map cases.
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
+ ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+
+ // Unmap the middle one.
+ map1.reset();
+
+ // Should return false now that there's a gap in the middle.
+ ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+}
+
} // namespace art