summaryrefslogtreecommitdiffstats
path: root/runtime/monitor_pool.cc
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2014-04-17 10:35:09 -0700
committerAndreas Gampe <agampe@google.com>2014-07-10 00:51:04 -0700
commit74240819ae09e29b2753ef38f4eb4be1c2762e2e (patch)
tree61e2d3aa7268ce49fe77715593896f59feb92fe6 /runtime/monitor_pool.cc
parent32710dd4a0232149002a5ae7bde1c640cdffd564 (diff)
downloadart-74240819ae09e29b2753ef38f4eb4be1c2762e2e.zip
art-74240819ae09e29b2753ef38f4eb4be1c2762e2e.tar.gz
art-74240819ae09e29b2753ef38f4eb4be1c2762e2e.tar.bz2
Use memory chunks for monitors on LP64
Monitor IDs in lock words are only 30b. On a 32b system that works fine, as memory is usually aligned enough that shifting works out. On 64b systems, the virtual memory space is too large for that. This adds memory chunks into which we allocate the monitors so that we have base_addr + offset and can use the offset as the monitor ID. To allow for relatively compact but growable storage, we use a list of chunks. Added a global lock for the monitor pool. Change-Id: I0e290c4914a2556e0b2eef9902422d7c4dcf536d
Diffstat (limited to 'runtime/monitor_pool.cc')
-rw-r--r--runtime/monitor_pool.cc128
1 files changed, 105 insertions, 23 deletions
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index eb7525a..440a6be 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -23,36 +23,118 @@
namespace art {
-MonitorPool::MonitorPool() : allocated_ids_lock_("allocated monitor ids lock",
- LockLevel::kMonitorPoolLock) {
-}
+namespace mirror {
+ class Object;
+} // namespace mirror
-Monitor* MonitorPool::LookupMonitorFromTable(MonitorId mon_id) {
- ReaderMutexLock mu(Thread::Current(), allocated_ids_lock_);
- return table_.Get(mon_id);
+MonitorPool::MonitorPool()
+ : num_chunks_(0), capacity_(0), first_free_(nullptr) {
+ AllocateChunk(); // Get our first chunk.
}
-MonitorId MonitorPool::AllocMonitorIdFromTable(Thread* self, Monitor* mon) {
- WriterMutexLock mu(self, allocated_ids_lock_);
- for (size_t i = 0; i < allocated_ids_.size(); ++i) {
- if (!allocated_ids_[i]) {
- allocated_ids_.set(i);
- MonitorId mon_id = i + 1; // Zero is reserved to mean "invalid".
- table_.Put(mon_id, mon);
- return mon_id;
+// Assumes locks are held appropriately when necessary.
+// We do not need a lock in the constructor, but we need one when in CreateMonitorInPool.
+void MonitorPool::AllocateChunk() {
+ DCHECK(first_free_ == nullptr);
+
+ // Do we need to resize?
+ if (num_chunks_ == capacity_) {
+ if (capacity_ == 0U) {
+ // Initialization.
+ capacity_ = kInitialChunkStorage;
+ uintptr_t* new_backing = new uintptr_t[capacity_];
+ monitor_chunks_.StoreRelaxed(new_backing);
+ } else {
+ size_t new_capacity = 2 * capacity_;
+ uintptr_t* new_backing = new uintptr_t[new_capacity];
+ uintptr_t* old_backing = monitor_chunks_.LoadRelaxed();
+ memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_);
+ monitor_chunks_.StoreRelaxed(new_backing);
+ capacity_ = new_capacity;
+ old_chunk_arrays_.push_back(old_backing);
+ LOG(INFO) << "Resizing to capacity " << capacity_;
}
}
- LOG(FATAL) << "Out of internal monitor ids";
- return 0;
+
+ // Allocate the chunk.
+ void* chunk = malloc(kChunkSize);
+ // Check we allocated memory.
+ CHECK_NE(reinterpret_cast<uintptr_t>(nullptr), reinterpret_cast<uintptr_t>(chunk));
+ // Check it is aligned as we need it.
+ CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
+
+ // Add the chunk.
+ *(monitor_chunks_.LoadRelaxed()+num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
+ num_chunks_++;
+
+ // Set up the free list
+ Monitor* last = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(chunk) +
+ (kChunkCapacity - 1) * kAlignedMonitorSize);
+ last->next_free_ = nullptr;
+ // Eagerly compute id.
+ last->monitor_id_ = OffsetToMonitorId((num_chunks_ - 1) * kChunkSize +
+ (kChunkCapacity - 1) * kAlignedMonitorSize);
+ for (size_t i = 0; i < kChunkCapacity - 1; ++i) {
+ Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) -
+ kAlignedMonitorSize);
+ before->next_free_ = last;
+ // Derive monitor_id from last.
+ before->monitor_id_ = OffsetToMonitorId(MonitorIdToOffset(last->monitor_id_) -
+ kAlignedMonitorSize);
+
+ last = before;
+ }
+ DCHECK(last == reinterpret_cast<Monitor*>(chunk));
+ first_free_ = last;
+}
+
+Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
+ int32_t hash_code)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // We are gonna allocate, so acquire the writer lock.
+ MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
+
+ // Enough space, or need to resize?
+ if (first_free_ == nullptr) {
+ LOG(INFO) << "Allocating a new chunk.";
+ AllocateChunk();
+ }
+
+ Monitor* mon_uninitialized = first_free_;
+ first_free_ = first_free_->next_free_;
+
+ // Pull out the id which was preinitialized.
+ MonitorId id = mon_uninitialized->monitor_id_;
+
+ // Initialize it.
+ Monitor* monitor = new(mon_uninitialized) Monitor(self, owner, obj, hash_code, id);
+
+ return monitor;
}
-void MonitorPool::ReleaseMonitorIdFromTable(MonitorId mon_id) {
- WriterMutexLock mu(Thread::Current(), allocated_ids_lock_);
- DCHECK(table_.Get(mon_id) != nullptr);
- table_.erase(mon_id);
- --mon_id; // Zero is reserved to mean "invalid".
- DCHECK(allocated_ids_[mon_id]) << mon_id;
- allocated_ids_.reset(mon_id);
+void MonitorPool::ReleaseMonitorToPool(Thread* self, Monitor* monitor) {
+ // Might be racy with allocation, so acquire lock.
+ MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
+
+ // Keep the monitor id. Don't trust it's not cleared.
+ MonitorId id = monitor->monitor_id_;
+
+ // Call the destructor.
+ // TODO: Exception safety?
+ monitor->~Monitor();
+
+ // Add to the head of the free list.
+ monitor->next_free_ = first_free_;
+ first_free_ = monitor;
+
+ // Rewrite monitor id.
+ monitor->monitor_id_ = id;
+}
+
+void MonitorPool::ReleaseMonitorsToPool(Thread* self, std::list<Monitor*>* monitors) {
+ for (Monitor* mon : *monitors) {
+ ReleaseMonitorToPool(self, mon);
+ }
}
} // namespace art