summaryrefslogtreecommitdiffstats
path: root/runtime/mem_map.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/mem_map.cc')
-rw-r--r--runtime/mem_map.cc69
1 files changed, 65 insertions, 4 deletions
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 00316f7..70d3457 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -170,12 +170,73 @@ MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_beg
}
};
-void MemMap::UnMapAtEnd(byte* new_end) {
+MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+ std::string* error_msg) {
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
- size_t unmap_size = End() - new_end;
- munmap(new_end, unmap_size);
- size_ -= unmap_size;
+ DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+ DCHECK(IsAligned<kPageSize>(begin_));
+ DCHECK(IsAligned<kPageSize>(base_begin_));
+ DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
+ DCHECK(IsAligned<kPageSize>(new_end));
+ byte* old_end = begin_ + size_;
+ byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
+ byte* new_base_end = new_end;
+ DCHECK_LE(new_base_end, old_base_end);
+ if (new_base_end == old_base_end) {
+ return new MemMap(tail_name, NULL, 0, NULL, 0, tail_prot);
+ }
+ size_ = new_end - reinterpret_cast<byte*>(begin_);
+ base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
+ DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+ size_t tail_size = old_end - new_end;
+ byte* tail_base_begin = new_base_end;
+ size_t tail_base_size = old_base_end - new_base_end;
+ DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
+ DCHECK(IsAligned<kPageSize>(tail_base_size));
+
+#ifdef USE_ASHMEM
+ // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
+ // prefixed "dalvik-".
+ std::string debug_friendly_name("dalvik-");
+ debug_friendly_name += tail_name;
+ ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
+ int flags = MAP_PRIVATE;
+ if (fd.get() == -1) {
+ *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
+ tail_name, strerror(errno));
+ return nullptr;
+ }
+#else
+ ScopedFd fd(-1);
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#endif
+
+ // Unmap/map the tail region.
+ int result = munmap(tail_base_begin, tail_base_size);
+ if (result == -1) {
+ std::string maps;
+ ReadFileToString("/proc/self/maps", &maps);
+ *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
+ tail_base_begin, tail_base_size, name_.c_str(),
+ maps.c_str());
+ return nullptr;
+ }
+ // Don't cause memory allocation between the munmap and the mmap
+ // calls. Otherwise, libc (or something else) might take this memory
+ // region. Note this isn't perfect as there's no way to prevent
+ // other threads to try to take this memory region here.
+ byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
+ flags, fd.get(), 0));
+ if (actual == MAP_FAILED) {
+ std::string maps;
+ ReadFileToString("/proc/self/maps", &maps);
+ *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
+ tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
+ maps.c_str());
+ return nullptr;
+ }
+ return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
}
bool MemMap::Protect(int prot) {