summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-07-15 16:32:50 -0700
committerMathieu Chartier <mathieuc@google.com>2013-07-15 17:16:37 -0700
commit4b95e8fad803ad307fa09c11c08894544e07a731 (patch)
tree1415e3c30d673aeba09e20afe567cc757af124f0
parent57a77c8650e84aefbc9985350989242296a18e00 (diff)
downloadart-4b95e8fad803ad307fa09c11c08894544e07a731.zip
art-4b95e8fad803ad307fa09c11c08894544e07a731.tar.gz
art-4b95e8fad803ad307fa09c11c08894544e07a731.tar.bz2
Refactor atomic integer.
Refactored atomic integer to be similar to c++11 std::atomic<int>. Removed jdwp serial lock and reverted lock level name change from https://googleplex-android-review.googlesource.com/#/c/327297/ Change-Id: I2229f30c4d5762a0e8c72697d6aca4683750af35
-rw-r--r--runtime/atomic_integer.h40
-rw-r--r--runtime/gc/accounting/atomic_stack.h12
-rw-r--r--runtime/gc/collector/mark_sweep.cc16
-rw-r--r--runtime/gc/heap.cc12
-rw-r--r--runtime/jdwp/jdwp.h6
-rw-r--r--runtime/jdwp/jdwp_main.cc5
-rw-r--r--runtime/locks.h2
-rw-r--r--runtime/thread_pool_test.cc2
8 files changed, 48 insertions, 47 deletions
diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h
index c4a8de9..324d08b 100644
--- a/runtime/atomic_integer.h
+++ b/runtime/atomic_integer.h
@@ -29,47 +29,51 @@ class AtomicInteger {
AtomicInteger(int32_t value) : value_(value) { }
// Unsafe = operator for non atomic operations on the integer.
- AtomicInteger& operator = (int32_t new_value) {
- value_ = new_value;
+ void store(int32_t desired) {
+ value_ = desired;
+ }
+
+ AtomicInteger& operator=(int32_t desired) {
+ store(desired);
return *this;
}
- operator int32_t () const {
+ int32_t load() const {
return value_;
}
- int32_t get() const {
- return value_;
+ operator int32_t() const {
+ return load();
}
- int32_t operator += (const int32_t value) {
+ int32_t fetch_add(const int32_t value) {
return android_atomic_add(value, &value_);
}
- int32_t operator -= (const int32_t value) {
+ int32_t fetch_sub(const int32_t value) {
return android_atomic_add(-value, &value_);
}
- int32_t operator |= (const int32_t value) {
- return android_atomic_or(value, &value_);
+ int32_t operator++() {
+ return android_atomic_inc(&value_) + 1;
}
- int32_t operator &= (const int32_t value) {
- return android_atomic_and(-value, &value_);
+ int32_t operator++(int32_t) {
+ return android_atomic_inc(&value_);
}
- int32_t operator ++ () {
- return android_atomic_inc(&value_) + 1;
+ int32_t operator--() {
+ return android_atomic_dec(&value_) - 1;
}
- int32_t operator -- () {
- return android_atomic_dec(&value_) - 1;
+ int32_t operator--(int32_t) {
+ return android_atomic_dec(&value_);
}
- bool CompareAndSwap(int expected_value, int new_value) {
- bool success = android_atomic_cas(expected_value, new_value, &value_) == 0;
- return success;
+ bool compare_and_swap(int32_t expected_value, int32_t desired_value) {
+ return android_atomic_cas(expected_value, desired_value, &value_) == 0;
}
+
private:
volatile int32_t value_;
};
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 4e1c253..054dced 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -66,7 +66,7 @@ class AtomicStack {
// Stack overflow.
return false;
}
- } while(!back_index_.CompareAndSwap(index, index + 1));
+ } while(!back_index_.compare_and_swap(index, index + 1));
begin_[index] = value;
return true;
}
@@ -89,7 +89,7 @@ class AtomicStack {
// Take an item from the front of the stack.
T PopFront() {
int32_t index = front_index_;
- DCHECK_LT(index, back_index_.get());
+ DCHECK_LT(index, back_index_.load());
front_index_ = front_index_ + 1;
return begin_[index];
}
@@ -123,12 +123,12 @@ class AtomicStack {
void Sort() {
if (!is_sorted_) {
- int32_t start_back_index = back_index_.get();
- int32_t start_front_index = front_index_.get();
+ int32_t start_back_index = back_index_.load();
+ int32_t start_front_index = front_index_.load();
is_sorted_ = true;
std::sort(Begin(), End());
- CHECK_EQ(start_back_index, back_index_.get());
- CHECK_EQ(start_front_index, front_index_.get());
+ CHECK_EQ(start_back_index, back_index_.load());
+ CHECK_EQ(start_front_index, front_index_.load());
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 79a571b..7664657 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -852,8 +852,8 @@ void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
// AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
heap->RecordFree(freed_objects, freed_bytes);
- mark_sweep->freed_objects_ += freed_objects;
- mark_sweep->freed_bytes_ += freed_bytes;
+ mark_sweep->freed_objects_.fetch_add(freed_objects);
+ mark_sweep->freed_bytes_.fetch_add(freed_bytes);
}
void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
@@ -918,8 +918,8 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
VLOG(heap) << "Freed " << freed_objects << "/" << count
<< " objects with size " << PrettySize(freed_bytes);
heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
- freed_objects_ += freed_objects;
- freed_bytes_ += freed_bytes;
+ freed_objects_.fetch_add(freed_objects);
+ freed_bytes_.fetch_add(freed_bytes);
timings_.NewSplit("ResetStack");
allocations->Reset();
@@ -997,8 +997,8 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
++freed_objects;
}
}
- freed_objects_ += freed_objects;
- freed_bytes_ += freed_bytes;
+ freed_objects_.fetch_add(freed_objects);
+ freed_bytes_.fetch_add(freed_bytes);
GetHeap()->RecordFree(freed_objects, freed_bytes);
}
@@ -1205,7 +1205,7 @@ class MarkStackChunk : public Task {
thread_pool_->AddTask(Thread::Current(), output_);
output_ = NULL;
if (kMeasureOverhead) {
- mark_sweep_->overhead_time_ += NanoTime() - start;
+ mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
}
}
}
@@ -1217,7 +1217,7 @@ class MarkStackChunk : public Task {
}
output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
if (kMeasureOverhead) {
- mark_sweep_->overhead_time_ += NanoTime() - start;
+ mark_sweep_->overhead_time_.fetch_add(NanoTime() - start);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index eae1520..fe0b740 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -580,7 +580,7 @@ mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_co
VerifyObject(obj);
if (measure_allocation_time_) {
- total_allocation_time_ += NanoTime() / kTimeAdjust - allocation_start;
+ total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start);
}
return obj;
@@ -729,7 +729,7 @@ void Heap::VerifyHeap() {
void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
DCHECK(obj != NULL);
DCHECK_GT(size, 0u);
- num_bytes_allocated_ += size;
+ num_bytes_allocated_.fetch_add(size);
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
@@ -751,7 +751,7 @@ void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
- num_bytes_allocated_ -= freed_bytes;
+ num_bytes_allocated_.fetch_sub(freed_bytes);
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
@@ -1984,7 +1984,7 @@ bool Heap::IsGCRequestPending() const {
void Heap::RegisterNativeAllocation(int bytes) {
// Total number of native bytes allocated.
- native_bytes_allocated_ += bytes;
+ native_bytes_allocated_.fetch_add(bytes);
Thread* self = Thread::Current();
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
// The second watermark is higher than the gc watermark. If you hit this it means you are
@@ -2027,14 +2027,14 @@ void Heap::RegisterNativeAllocation(int bytes) {
void Heap::RegisterNativeFree(int bytes) {
int expected_size, new_size;
do {
- expected_size = native_bytes_allocated_.get();
+ expected_size = native_bytes_allocated_.load();
new_size = expected_size - bytes;
if (new_size < 0) {
ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated",
bytes, expected_size);
break;
}
- } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
+ } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
}
} // namespace gc
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 436525c..ef07393 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -17,6 +17,7 @@
#ifndef ART_JDWP_JDWP_H_
#define ART_JDWP_JDWP_H_
+#include "atomic_integer.h"
#include "base/mutex.h"
#include "jdwp/jdwp_bits.h"
#include "jdwp/jdwp_constants.h"
@@ -319,9 +320,8 @@ struct JdwpState {
int64_t last_activity_time_ms_;
// Global counters and a mutex to protect them.
- Mutex serial_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- uint32_t request_serial_ GUARDED_BY(serial_lock_);
- uint32_t event_serial_ GUARDED_BY(serial_lock_);
+ AtomicInteger request_serial_;
+ AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
Mutex event_list_lock_;
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index e831af4..8e61d23 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -36,7 +36,7 @@ static void* StartJdwpThread(void* arg);
* JdwpNetStateBase class implementation
*/
JdwpNetStateBase::JdwpNetStateBase(JdwpState* state)
- : state_(state), socket_lock_("JdwpNetStateBase lock", kJdwpSerialSocketLock) {
+ : state_(state), socket_lock_("JdwpNetStateBase lock", kJdwpSocketLock) {
clientSock = -1;
wake_pipe_[0] = -1;
wake_pipe_[1] = -1;
@@ -185,7 +185,6 @@ void JdwpState::SendRequest(ExpandBuf* pReq) {
* packets to the debugger.
*/
uint32_t JdwpState::NextRequestSerial() {
- MutexLock mu(Thread::Current(), serial_lock_);
return request_serial_++;
}
@@ -194,7 +193,6 @@ uint32_t JdwpState::NextRequestSerial() {
* message type EventRequest.Set.
*/
uint32_t JdwpState::NextEventSerial() {
- MutexLock mu(Thread::Current(), serial_lock_);
return event_serial_++;
}
@@ -211,7 +209,6 @@ JdwpState::JdwpState(const JdwpOptions* options)
attach_lock_("JDWP attach lock", kJdwpAttachLock),
attach_cond_("JDWP attach condition variable", attach_lock_),
last_activity_time_ms_(0),
- serial_lock_("JDWP serial lock", kJdwpSerialSocketLock),
request_serial_(0x10000000),
event_serial_(0x20000000),
event_list_lock_("JDWP event list lock", kJdwpEventListLock),
diff --git a/runtime/locks.h b/runtime/locks.h
index 91437e1..8cc39ea 100644
--- a/runtime/locks.h
+++ b/runtime/locks.h
@@ -36,7 +36,7 @@ enum LockLevel {
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
- kJdwpSerialSocketLock,
+ kJdwpSocketLock,
kAllocSpaceLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index e2a32f5..4717ce7 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -95,7 +95,7 @@ TEST_F(ThreadPoolTest, StopStart) {
EXPECT_EQ(0, bad_count);
// Allow tasks to finish up and delete themselves.
thread_pool.StartWorkers(self);
- while (count.get() != num_tasks && bad_count.get() != 1) {
+ while (count.load() != num_tasks && bad_count.load() != 1) {
usleep(200);
}
thread_pool.StopWorkers(self);