summaryrefslogtreecommitdiffstats
path: root/runtime/indirect_reference_table.h
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-04 13:18:58 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-05 15:46:09 -0700
commitc56057e40938c587a74984651a510e320a8cb4fd (patch)
tree6595bb7aa27ecf91dc9121d25722b40dec803ee6 /runtime/indirect_reference_table.h
parent0b8027003514c4fa6a850e5087076e991daaf4c3 (diff)
downloadart-c56057e40938c587a74984651a510e320a8cb4fd.zip
art-c56057e40938c587a74984651a510e320a8cb4fd.tar.gz
art-c56057e40938c587a74984651a510e320a8cb4fd.tar.bz2
Add lockless SynchronizedGet for indirect reference table.
Used for decoding global references without holding locks. Results on JniCallback: Before: 615ms (3 samples). After: 585ms (3 samples). Change-Id: Ifcac8d0359cf658d87f695c6eb869d148af002e5
Diffstat (limited to 'runtime/indirect_reference_table.h')
-rw-r--r--runtime/indirect_reference_table.h30
1 files changed, 19 insertions, 11 deletions
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index a2de726..f365acc 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -24,6 +24,7 @@
#include "base/logging.h"
#include "base/mutex.h"
+#include "mem_map.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -72,7 +73,7 @@ class Object;
* To make everything fit nicely in 32-bit integers, the maximum size of
* the table is capped at 64K.
*
- * None of the table functions are synchronized.
+ * Only SynchronizedGet is synchronized.
*/
/*
@@ -191,11 +192,6 @@ static const uint32_t IRT_FIRST_SEGMENT = 0;
* and local refs to improve performance. A large circular buffer might
* reduce the amortized cost of adding global references.
*
- * TODO: if we can guarantee that the underlying storage doesn't move,
- * e.g. by using oversized mmap regions to handle expanding tables, we may
- * be able to avoid having to synchronize lookups. Might make sense to
- * add a "synchronized lookup" call that takes the mutex as an argument,
- * and either locks or doesn't lock based on internal details.
*/
union IRTSegmentState {
uint32_t all;
@@ -234,7 +230,7 @@ class IrtIterator {
}
}
- mirror::Object** table_;
+ mirror::Object** const table_;
size_t i_;
size_t capacity_;
};
@@ -267,10 +263,15 @@ class IndirectReferenceTable {
*
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
- mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // TODO: remove when we remove work_around_app_jni_bugs support.
- bool ContainsDirectPointer(mirror::Object* direct_pointer) const;
+ mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE;
+
+ // Synchronized get which reads a reference, acquiring a lock if necessary.
+ mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
+ IndirectRef iref) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return Get(iref);
+ }
/*
* Remove an existing entry.
@@ -351,6 +352,9 @@ class IndirectReferenceTable {
}
}
+ // Abort if check_jni is not enabled.
+ static void AbortIfNoCheckJNI();
+
/* extra debugging checks */
bool GetChecked(IndirectRef) const;
bool CheckEntry(const char*, IndirectRef, int) const;
@@ -358,6 +362,10 @@ class IndirectReferenceTable {
/* semi-public - read/write by jni down calls */
IRTSegmentState segment_state_;
+ // Mem map where we store the indirect refs.
+ UniquePtr<MemMap> table_mem_map_;
+ // Mem map where we store the extended debugging info.
+ UniquePtr<MemMap> slot_mem_map_;
/* bottom of the stack */
mirror::Object** table_;
/* bit mask, ORed into all irefs */