summaryrefslogtreecommitdiffstats
path: root/runtime/base
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/base')
-rw-r--r--runtime/base/allocator.cc74
-rw-r--r--runtime/base/allocator.h41
-rw-r--r--runtime/base/bit_vector.cc177
-rw-r--r--runtime/base/bit_vector.h134
-rw-r--r--runtime/base/bit_vector_test.cc122
-rw-r--r--runtime/base/histogram-inl.h29
-rw-r--r--runtime/base/histogram.h6
-rw-r--r--runtime/base/histogram_test.cc32
-rw-r--r--runtime/base/logging.cc27
-rw-r--r--runtime/base/macros.h6
-rw-r--r--runtime/base/mutex-inl.h128
-rw-r--r--runtime/base/mutex.cc171
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/base/timing_logger.cc109
-rw-r--r--runtime/base/timing_logger.h35
-rw-r--r--runtime/base/timing_logger_test.cc30
-rw-r--r--runtime/base/unix_file/fd_file.cc4
-rw-r--r--runtime/base/unix_file/fd_file.h4
18 files changed, 856 insertions, 275 deletions
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
new file mode 100644
index 0000000..4f7753d
--- /dev/null
+++ b/runtime/base/allocator.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "allocator.h"
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+
+namespace art {
+
+class MallocAllocator : public Allocator {
+ public:
+ explicit MallocAllocator() {}
+ ~MallocAllocator() {}
+
+ virtual void* Alloc(size_t size) {
+ return calloc(sizeof(uint8_t), size);
+ }
+
+ virtual void Free(void* p) {
+ free(p);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MallocAllocator);
+};
+
+MallocAllocator g_malloc_allocator;
+
+class NoopAllocator : public Allocator {
+ public:
+ explicit NoopAllocator() {}
+ ~NoopAllocator() {}
+
+ virtual void* Alloc(size_t size) {
+ LOG(FATAL) << "NoopAllocator::Alloc should not be called";
+ return NULL;
+ }
+
+ virtual void Free(void* p) {
+ // Noop.
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(NoopAllocator);
+};
+
+NoopAllocator g_noop_allocator;
+
+Allocator* Allocator::GetMallocAllocator() {
+ return &g_malloc_allocator;
+}
+
+Allocator* Allocator::GetNoopAllocator() {
+ return &g_noop_allocator;
+}
+
+
+} // namespace art
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
new file mode 100644
index 0000000..917bf0b
--- /dev/null
+++ b/runtime/base/allocator.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ALLOCATOR_H_
+#define ART_RUNTIME_BASE_ALLOCATOR_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+class Allocator {
+ public:
+ static Allocator* GetMallocAllocator();
+ static Allocator* GetNoopAllocator();
+
+ Allocator() {}
+ virtual ~Allocator() {}
+
+ virtual void* Alloc(size_t) = 0;
+ virtual void Free(void*) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Allocator);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
new file mode 100644
index 0000000..3b82651
--- /dev/null
+++ b/runtime/base/bit_vector.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_vector.h"
+
+namespace art {
+
+// TODO: profile to make sure this is still a win relative to just using shifted masks.
+static uint32_t check_masks[32] = {
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+ 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+ 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+ 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+ 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+ 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+ 0x40000000, 0x80000000 };
+
+static inline uint32_t BitsToWords(uint32_t bits) {
+ return (bits + 31) >> 5;
+}
+
+// TODO: replace excessive argument defaulting when we are at gcc 4.7
+// or later on host with delegating constructor support. Specifically,
+// starts_bits and storage_size/storage are mutually exclusive.
+BitVector::BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator,
+ uint32_t storage_size,
+ uint32_t* storage)
+ : allocator_(allocator),
+ expandable_(expandable),
+ storage_size_(storage_size),
+ storage_(storage) {
+ DCHECK_EQ(sizeof(storage_[0]), 4U); // Assuming 32-bit units.
+ if (storage_ == NULL) {
+ storage_size_ = BitsToWords(start_bits);
+ storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * sizeof(uint32_t)));
+ }
+}
+
+BitVector::~BitVector() {
+ allocator_->Free(storage_);
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool BitVector::IsBitSet(uint32_t num) const {
+ DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+
+ uint32_t val = storage_[num >> 5] & check_masks[num & 0x1f];
+ return (val != 0);
+}
+
+// Mark all bits bit as "clear".
+void BitVector::ClearAllBits() {
+ memset(storage_, 0, storage_size_ * sizeof(uint32_t));
+}
+
+// Mark the specified bit as "set".
+/*
+ * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
+ * not using it badly or change resize mechanism.
+ */
+void BitVector::SetBit(uint32_t num) {
+ if (num >= storage_size_ * sizeof(uint32_t) * 8) {
+ DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
+
+ /* Round up to word boundaries for "num+1" bits */
+ uint32_t new_size = BitsToWords(num + 1);
+ DCHECK_GT(new_size, storage_size_);
+ uint32_t *new_storage =
+ static_cast<uint32_t*>(allocator_->Alloc(new_size * sizeof(uint32_t)));
+ memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
+ // Zero out the new storage words.
+ memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
+ // TOTO: collect stats on space wasted because of resize.
+ storage_ = new_storage;
+ storage_size_ = new_size;
+ }
+
+ storage_[num >> 5] |= check_masks[num & 0x1f];
+}
+
+// Mark the specified bit as "unset".
+void BitVector::ClearBit(uint32_t num) {
+ DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+ storage_[num >> 5] &= ~check_masks[num & 0x1f];
+}
+
+// Intersect with another bit vector. Sizes and expandability must be the same.
+void BitVector::Intersect(const BitVector* src) {
+ DCHECK_EQ(storage_size_, src->GetStorageSize());
+ DCHECK_EQ(expandable_, src->IsExpandable());
+ for (uint32_t idx = 0; idx < storage_size_; idx++) {
+ storage_[idx] &= src->GetRawStorageWord(idx);
+ }
+}
+
+/*
+ * Union with another bit vector. Sizes and expandability must be the same.
+ */
+void BitVector::Union(const BitVector* src) {
+ DCHECK_EQ(storage_size_, src->GetStorageSize());
+ DCHECK_EQ(expandable_, src->IsExpandable());
+ for (uint32_t idx = 0; idx < storage_size_; idx++) {
+ storage_[idx] |= src->GetRawStorageWord(idx);
+ }
+}
+
+// Count the number of bits that are set.
+uint32_t BitVector::NumSetBits() const {
+ uint32_t count = 0;
+ for (uint32_t word = 0; word < storage_size_; word++) {
+ count += __builtin_popcount(storage_[word]);
+ }
+ return count;
+}
+
+// Count the number of bits that are set up through and including num.
+uint32_t BitVector::NumSetBits(uint32_t num) const {
+ DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+ uint32_t last_word = num >> 5;
+ uint32_t partial_word_bits = num & 0x1f;
+
+ // partial_word_bits | # | | | partial_word_mask
+ // 00000 | 0 | 0xffffffff >> (31 - 0) | (1 << (0 + 1)) - 1 | 0x00000001
+ // 00001 | 1 | 0xffffffff >> (31 - 1) | (1 << (1 + 1)) - 1 | 0x00000003
+ // 00010 | 2 | 0xffffffff >> (31 - 2) | (1 << (2 + 1)) - 1 | 0x00000007
+ // ..... |
+ // 11110 | 30 | 0xffffffff >> (31 - 30) | (1 << (30 + 1)) - 1 | 0x7fffffff
+ // 11111 | 31 | 0xffffffff >> (31 - 31) | last_full_word++ | 0xffffffff
+ uint32_t partial_word_mask = 0xffffffff >> (0x1f - partial_word_bits);
+
+ uint32_t count = 0;
+ for (uint32_t word = 0; word < last_word; word++) {
+ count += __builtin_popcount(storage_[word]);
+ }
+ count += __builtin_popcount(storage_[last_word] & partial_word_mask);
+ return count;
+}
+
+BitVector::Iterator* BitVector::GetIterator() const {
+ return new (allocator_) Iterator(this);
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void BitVector::SetInitialBits(uint32_t num_bits) {
+ DCHECK_LE(BitsToWords(num_bits), storage_size_);
+ uint32_t idx;
+ for (idx = 0; idx < (num_bits >> 5); idx++) {
+ storage_[idx] = -1;
+ }
+ uint32_t rem_num_bits = num_bits & 0x1f;
+ if (rem_num_bits) {
+ storage_[idx] = (1 << rem_num_bits) - 1;
+ }
+}
+
+} // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
new file mode 100644
index 0000000..74bec08
--- /dev/null
+++ b/runtime/base/bit_vector.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_VECTOR_H_
+#define ART_RUNTIME_BASE_BIT_VECTOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+
+#include "allocator.h"
+#include "base/logging.h"
+#include "utils.h"
+
+namespace art {
+
+/*
+ * Expanding bitmap, used for tracking resources. Bits are numbered starting
+ * from zero. All operations on a BitVector are unsynchronized.
+ */
+class BitVector {
+ public:
+ class Iterator {
+ public:
+ explicit Iterator(const BitVector* bit_vector)
+ : p_bits_(bit_vector),
+ bit_storage_(bit_vector->GetRawStorage()),
+ bit_index_(0),
+ bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
+
+ // Return the position of the next set bit. -1 means end-of-element reached.
+ int32_t Next() {
+ // Did anything obviously change since we started?
+ DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
+ DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
+
+ if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
+
+ uint32_t word_index = bit_index_ / 32;
+ uint32_t word = bit_storage_[word_index];
+ // Mask out any bits in the first word we've already considered.
+ word >>= bit_index_ & 0x1f;
+ if (word == 0) {
+ bit_index_ &= ~0x1f;
+ do {
+ word_index++;
+ if (UNLIKELY((word_index * 32) >= bit_size_)) {
+ bit_index_ = bit_size_;
+ return -1;
+ }
+ word = bit_storage_[word_index];
+ bit_index_ += 32;
+ } while (word == 0);
+ }
+ bit_index_ += CTZ(word) + 1;
+ return bit_index_ - 1;
+ }
+
+ static void* operator new(size_t size, Allocator* allocator) {
+ return allocator->Alloc(sizeof(BitVector::Iterator));
+ };
+ static void operator delete(void* p) {
+ Iterator* it = reinterpret_cast<Iterator*>(p);
+ it->p_bits_->allocator_->Free(p);
+ }
+
+ private:
+ const BitVector* const p_bits_;
+ const uint32_t* const bit_storage_;
+ uint32_t bit_index_; // Current index (size in bits).
+ const uint32_t bit_size_; // Size of vector in bits.
+
+ friend class BitVector;
+ };
+
+ BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator,
+ uint32_t storage_size = 0,
+ uint32_t* storage = NULL);
+
+ virtual ~BitVector();
+
+ void SetBit(uint32_t num);
+ void ClearBit(uint32_t num);
+ bool IsBitSet(uint32_t num) const;
+ void ClearAllBits();
+ void SetInitialBits(uint32_t num_bits);
+ void Copy(BitVector* src) {
+ memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+ }
+ void Intersect(const BitVector* src2);
+ void Union(const BitVector* src);
+ // Are we equal to another bit vector? Note: expandability attributes must also match.
+ bool Equal(const BitVector* src) {
+ return (storage_size_ == src->GetStorageSize()) &&
+ (expandable_ == src->IsExpandable()) &&
+ (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+ }
+ uint32_t NumSetBits() const;
+ uint32_t NumSetBits(uint32_t num) const;
+
+ Iterator* GetIterator() const;
+
+ uint32_t GetStorageSize() const { return storage_size_; }
+ bool IsExpandable() const { return expandable_; }
+ uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
+ uint32_t* GetRawStorage() { return storage_; }
+ const uint32_t* GetRawStorage() const { return storage_; }
+ size_t GetSizeOf() const { return storage_size_ * sizeof(uint32_t); }
+
+ private:
+ Allocator* const allocator_;
+ const bool expandable_; // expand bitmap if we run out?
+ uint32_t storage_size_; // current size, in 32-bit words.
+ uint32_t* storage_;
+};
+
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_VECTOR_H_
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
new file mode 100644
index 0000000..d99d059
--- /dev/null
+++ b/runtime/base/bit_vector_test.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "UniquePtr.h"
+#include "bit_vector.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(BitVector, Test) {
+ const size_t kBits = 32;
+
+ BitVector bv(kBits, false, Allocator::GetMallocAllocator());
+ EXPECT_EQ(1U, bv.GetStorageSize());
+ EXPECT_EQ(kWordSize, bv.GetSizeOf());
+ EXPECT_FALSE(bv.IsExpandable());
+
+ EXPECT_EQ(0U, bv.NumSetBits());
+ EXPECT_EQ(0U, bv.NumSetBits(0));
+ EXPECT_EQ(0U, bv.NumSetBits(kBits - 1));
+ for (size_t i = 0; i < kBits; i++) {
+ EXPECT_FALSE(bv.IsBitSet(i));
+ }
+ EXPECT_EQ(0U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0U, *bv.GetRawStorage());
+
+ BitVector::Iterator empty_iterator(&bv);
+ EXPECT_EQ(-1, empty_iterator.Next());
+
+ UniquePtr<BitVector::Iterator> empty_iterator_on_heap(bv.GetIterator());
+ EXPECT_EQ(-1, empty_iterator_on_heap->Next());
+
+ bv.SetBit(0);
+ bv.SetBit(kBits - 1);
+ EXPECT_EQ(2U, bv.NumSetBits());
+ EXPECT_EQ(1U, bv.NumSetBits(0));
+ EXPECT_EQ(2U, bv.NumSetBits(kBits - 1));
+ EXPECT_TRUE(bv.IsBitSet(0));
+ for (size_t i = 1; i < kBits - 1; i++) {
+ EXPECT_FALSE(bv.IsBitSet(i));
+ }
+ EXPECT_TRUE(bv.IsBitSet(kBits - 1));
+ EXPECT_EQ(0x80000001U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x80000001U, *bv.GetRawStorage());
+
+ BitVector::Iterator iterator(&bv);
+ EXPECT_EQ(0, iterator.Next());
+ EXPECT_EQ(static_cast<int>(kBits - 1), iterator.Next());
+ EXPECT_EQ(-1, iterator.Next());
+}
+
+TEST(BitVector, NoopAllocator) {
+ const uint32_t kWords = 2;
+
+ uint32_t bits[kWords];
+ memset(bits, 0, sizeof(bits));
+
+ BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits);
+ EXPECT_EQ(kWords, bv.GetStorageSize());
+ EXPECT_EQ(kWords * kWordSize, bv.GetSizeOf());
+ EXPECT_EQ(bits, bv.GetRawStorage());
+ EXPECT_EQ(0U, bv.NumSetBits());
+
+ bv.SetBit(8);
+ EXPECT_EQ(1U, bv.NumSetBits());
+ EXPECT_EQ(0x00000100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(1U, bv.NumSetBits());
+
+ bv.SetBit(16);
+ EXPECT_EQ(2U, bv.NumSetBits());
+ EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(2U, bv.NumSetBits());
+
+ bv.SetBit(32);
+ EXPECT_EQ(3U, bv.NumSetBits());
+ EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00000001U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(3U, bv.NumSetBits());
+
+ bv.SetBit(48);
+ EXPECT_EQ(4U, bv.NumSetBits());
+ EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00010001U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(4U, bv.NumSetBits());
+
+ EXPECT_EQ(0U, bv.NumSetBits(0));
+
+ EXPECT_EQ(0U, bv.NumSetBits(7));
+ EXPECT_EQ(1U, bv.NumSetBits(8));
+ EXPECT_EQ(1U, bv.NumSetBits(9));
+
+ EXPECT_EQ(1U, bv.NumSetBits(15));
+ EXPECT_EQ(2U, bv.NumSetBits(16));
+ EXPECT_EQ(2U, bv.NumSetBits(17));
+
+ EXPECT_EQ(2U, bv.NumSetBits(31));
+ EXPECT_EQ(3U, bv.NumSetBits(32));
+ EXPECT_EQ(3U, bv.NumSetBits(33));
+
+ EXPECT_EQ(3U, bv.NumSetBits(47));
+ EXPECT_EQ(4U, bv.NumSetBits(48));
+ EXPECT_EQ(4U, bv.NumSetBits(49));
+
+ EXPECT_EQ(4U, bv.NumSetBits(63));
+}
+
+} // namespace art
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 0345266..7c09999 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -39,6 +39,13 @@ template <class Value> inline void Histogram<Value>::AddValue(Value value) {
BucketiseValue(value);
}
+template <class Value> inline Histogram<Value>::Histogram(const char* name)
+ : kAdjust(0),
+ kInitialBucketCount(0),
+ name_(name),
+ max_buckets_(0) {
+}
+
template <class Value>
inline Histogram<Value>::Histogram(const char* name, Value initial_bucket_width,
size_t max_buckets)
@@ -162,28 +169,30 @@ inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double
double per_0 = (1.0 - interval) / 2.0;
double per_1 = per_0 + interval;
- os << Name() << ":\t";
TimeUnit unit = GetAppropriateTimeUnit(Mean() * kAdjust);
+ os << Name() << ":\tSum: ";
+ os << PrettyDuration(Sum() * kAdjust) << " ";
os << (interval * 100) << "% C.I. " << FormatDuration(Percentile(per_0, data) * kAdjust, unit);
os << "-" << FormatDuration(Percentile(per_1, data) * kAdjust, unit) << " ";
os << "Avg: " << FormatDuration(Mean() * kAdjust, unit) << " Max: ";
os << FormatDuration(Max() * kAdjust, unit) << "\n";
}
-template <class Value> inline void Histogram<Value>::CreateHistogram(CumulativeData& out_data) {
+template <class Value>
+inline void Histogram<Value>::CreateHistogram(CumulativeData* out_data) const {
DCHECK_GT(sample_size_, 0ull);
- out_data.freq_.clear();
- out_data.perc_.clear();
+ out_data->freq_.clear();
+ out_data->perc_.clear();
uint64_t accumulated = 0;
- out_data.freq_.push_back(accumulated);
- out_data.perc_.push_back(0.0);
+ out_data->freq_.push_back(accumulated);
+ out_data->perc_.push_back(0.0);
for (size_t idx = 0; idx < frequency_.size(); idx++) {
accumulated += frequency_[idx];
- out_data.freq_.push_back(accumulated);
- out_data.perc_.push_back(static_cast<double>(accumulated) / static_cast<double>(sample_size_));
+ out_data->freq_.push_back(accumulated);
+ out_data->perc_.push_back(static_cast<double>(accumulated) / static_cast<double>(sample_size_));
}
- DCHECK_EQ(out_data.freq_.back(), sample_size_);
- DCHECK_LE(std::abs(out_data.perc_.back() - 1.0), 0.001);
+ DCHECK_EQ(out_data->freq_.back(), sample_size_);
+ DCHECK_LE(std::abs(out_data->perc_.back() - 1.0), 0.001);
}
template <class Value>
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index 2a02cf4..a7d51e2 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -40,6 +40,10 @@ template <class Value> class Histogram {
std::vector<double> perc_;
};
+ // Used by the cumulative timing logger to search the histogram set using for an existing split
+ // with the same name using CumulativeLogger::HistogramComparator.
+ explicit Histogram(const char* name);
+ // This is the expected constructor when creating new Histograms.
Histogram(const char* name, Value initial_bucket_width, size_t max_buckets = 100);
void AddValue(Value);
// Builds the cumulative distribution function from the frequency data.
@@ -47,7 +51,7 @@ template <class Value> class Histogram {
// cumulative_freq[i] = sum(frequency[j] : 0 < j < i )
// Accumulative summation of percentiles; which is the frequency / SampleSize
// cumulative_perc[i] = sum(frequency[j] / SampleSize : 0 < j < i )
- void CreateHistogram(CumulativeData& data);
+ void CreateHistogram(CumulativeData* data) const;
// Reset the cumulative values, next time CreateHistogram is called it will recreate the cache.
void Reset();
double Mean() const;
diff --git a/runtime/base/histogram_test.cc b/runtime/base/histogram_test.cc
index 534440c..966b97f 100644
--- a/runtime/base/histogram_test.cc
+++ b/runtime/base/histogram_test.cc
@@ -85,7 +85,7 @@ TEST(Histtest, Percentile) {
hist->AddValue(145);
hist->AddValue(155);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
EXPECT_EQ(875, static_cast<int>(PerValue * 10));
}
@@ -117,12 +117,12 @@ TEST(Histtest, UpdateRange) {
hist->AddValue(200);
hist->AddValue(205);
hist->AddValue(212);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
std::string text;
std::stringstream stream;
- std::string expected("UpdateRange:\t99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
+ std::string expected("UpdateRange:\tSum: 2.654ms 99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
hist->PrintConfidenceIntervals(stream, 0.99, data);
EXPECT_EQ(expected, stream.str());
@@ -132,7 +132,6 @@ TEST(Histtest, UpdateRange) {
TEST(Histtest, Reset) {
UniquePtr<Histogram<uint64_t> > hist(new Histogram<uint64_t>("Reset", 5));
- Histogram<uint64_t>::CumulativeData data;
double PerValue;
hist->AddValue(0);
@@ -160,12 +159,13 @@ TEST(Histtest, Reset) {
hist->AddValue(200);
hist->AddValue(205);
hist->AddValue(212);
- hist->CreateHistogram(data);
+ Histogram<uint64_t>::CumulativeData data;
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
std::string text;
std::stringstream stream;
- std::string expected("Reset:\t99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
+ std::string expected("Reset:\tSum: 2.654ms 99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
hist->PrintConfidenceIntervals(stream, 0.99, data);
EXPECT_EQ(expected, stream.str());
@@ -185,7 +185,7 @@ TEST(Histtest, MultipleCreateHist) {
hist->AddValue(68);
hist->AddValue(75);
hist->AddValue(93);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
hist->AddValue(110);
hist->AddValue(121);
hist->AddValue(132);
@@ -194,17 +194,17 @@ TEST(Histtest, MultipleCreateHist) {
hist->AddValue(155);
hist->AddValue(163);
hist->AddValue(168);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
hist->AddValue(175);
hist->AddValue(182);
hist->AddValue(193);
hist->AddValue(200);
hist->AddValue(205);
hist->AddValue(212);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
PerValue = hist->Percentile(0.50, data);
std::stringstream stream;
- std::string expected("MultipleCreateHist:\t99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
+ std::string expected("MultipleCreateHist:\tSum: 2.654ms 99% C.I. 15us-212us Avg: 126.380us Max: 212us\n");
hist->PrintConfidenceIntervals(stream, 0.99, data);
EXPECT_EQ(expected, stream.str());
@@ -217,9 +217,9 @@ TEST(Histtest, SingleValue) {
Histogram<uint64_t>::CumulativeData data;
hist->AddValue(1);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
std::stringstream stream;
- std::string expected = "SingleValue:\t99% C.I. 1us-1us Avg: 1us Max: 1us\n";
+ std::string expected = "SingleValue:\tSum: 1us 99% C.I. 1us-1us Avg: 1us Max: 1us\n";
hist->PrintConfidenceIntervals(stream, 0.99, data);
EXPECT_EQ(expected, stream.str());
}
@@ -234,7 +234,7 @@ TEST(Histtest, CappingPercentiles) {
for (uint64_t idx = 0ull; idx < 150ull; idx++) {
hist->AddValue(0);
}
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
per_995 = hist->Percentile(0.995, data);
EXPECT_EQ(per_995, 0);
hist->Reset();
@@ -243,7 +243,7 @@ TEST(Histtest, CappingPercentiles) {
hist->AddValue(val);
}
}
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
per_005 = hist->Percentile(0.005, data);
per_995 = hist->Percentile(0.995, data);
EXPECT_EQ(1, per_005);
@@ -260,9 +260,9 @@ TEST(Histtest, SpikyValues) {
}
}
hist->AddValue(10000);
- hist->CreateHistogram(data);
+ hist->CreateHistogram(&data);
std::stringstream stream;
- std::string expected = "SpikyValues:\t99% C.I. 0.089us-2541.825us Avg: 95.033us Max: 10000us\n";
+ std::string expected = "SpikyValues:\tSum: 14.350ms 99% C.I. 0.089us-2541.825us Avg: 95.033us Max: 10000us\n";
hist->PrintConfidenceIntervals(stream, 0.99, data);
EXPECT_EQ(expected, stream.str());
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 7d54baf..3aabc8d 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -18,7 +18,8 @@
#include "base/mutex.h"
#include "runtime.h"
-#include "thread.h"
+#include "thread-inl.h"
+#include "UniquePtr.h"
#include "utils.h"
namespace art {
@@ -28,20 +29,21 @@ LogVerbosity gLogVerbosity;
unsigned int gAborting = 0;
static LogSeverity gMinimumLogSeverity = INFO;
-static std::string* gCmdLine = NULL;
-static std::string* gProgramInvocationName = NULL;
-static std::string* gProgramInvocationShortName = NULL;
+static UniquePtr<std::string> gCmdLine;
+static UniquePtr<std::string> gProgramInvocationName;
+static UniquePtr<std::string> gProgramInvocationShortName;
const char* GetCmdLine() {
- return (gCmdLine != NULL) ? gCmdLine->c_str() : NULL;
+ return (gCmdLine.get() != nullptr) ? gCmdLine->c_str() : nullptr;
}
const char* ProgramInvocationName() {
- return (gProgramInvocationName != NULL) ? gProgramInvocationName->c_str() : "art";
+ return (gProgramInvocationName.get() != nullptr) ? gProgramInvocationName->c_str() : "art";
}
const char* ProgramInvocationShortName() {
- return (gProgramInvocationShortName != NULL) ? gProgramInvocationShortName->c_str() : "art";
+ return (gProgramInvocationShortName.get() != nullptr) ? gProgramInvocationShortName->c_str()
+ : "art";
}
// Configure logging based on ANDROID_LOG_TAGS environment variable.
@@ -53,7 +55,7 @@ const char* ProgramInvocationShortName() {
// and a letter indicating the minimum priority level we're expected to log.
// This can be used to reveal or conceal logs with specific tags.
void InitLogging(char* argv[]) {
- if (gCmdLine != NULL) {
+ if (gCmdLine.get() != nullptr) {
return;
}
// TODO: Move this to a more obvious InitART...
@@ -63,17 +65,18 @@ void InitLogging(char* argv[]) {
// but we don't have that luxury on the Mac, and there are a couple of argv[0] variants that are
// commonly used.
if (argv != NULL) {
- gCmdLine = new std::string(argv[0]);
+ gCmdLine.reset(new std::string(argv[0]));
for (size_t i = 1; argv[i] != NULL; ++i) {
gCmdLine->append(" ");
gCmdLine->append(argv[i]);
}
- gProgramInvocationName = new std::string(argv[0]);
+ gProgramInvocationName.reset(new std::string(argv[0]));
const char* last_slash = strrchr(argv[0], '/');
- gProgramInvocationShortName = new std::string((last_slash != NULL) ? last_slash + 1 : argv[0]);
+ gProgramInvocationShortName.reset(new std::string((last_slash != NULL) ? last_slash + 1
+ : argv[0]));
} else {
// TODO: fall back to /proc/self/cmdline when argv is NULL on Linux
- gCmdLine = new std::string("<unset>");
+ gCmdLine.reset(new std::string("<unset>"));
}
const char* tags = getenv("ANDROID_LOG_TAGS");
if (tags == NULL) {
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 6531858..00a530a 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -130,6 +130,10 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define LIKELY(x) __builtin_expect((x), true)
#define UNLIKELY(x) __builtin_expect((x), false)
+// Stringify the argument.
+#define QUOTE(x) #x
+#define STRINGIFY(x) QUOTE(x)
+
#ifndef NDEBUG
#define ALWAYS_INLINE
#else
@@ -138,8 +142,10 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#if defined (__APPLE__)
#define HOT_ATTR
+#define COLD_ATTR
#else
#define HOT_ATTR __attribute__ ((hot))
+#define COLD_ATTR __attribute__ ((cold))
#endif
#define PURE __attribute__ ((__pure__))
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 7e8365e..29b3981 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -41,6 +41,54 @@ static inline int futex(volatile int *uaddr, int op, int val, const struct times
}
#endif // ART_USE_FUTEXES
+#if defined(__APPLE__)
+
+// This works on Mac OS 10.6 but hasn't been tested on older releases.
+struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
+ long padding0; // NOLINT(runtime/int) exact match to darwin type
+ int padding1;
+ uint32_t padding2;
+ int16_t padding3;
+ int16_t padding4;
+ uint32_t padding5;
+ pthread_t darwin_pthread_mutex_owner;
+ // ...other stuff we don't care about.
+};
+
+struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
+ long padding0; // NOLINT(runtime/int) exact match to darwin type
+ pthread_mutex_t padding1;
+ int padding2;
+ pthread_cond_t padding3;
+ pthread_cond_t padding4;
+ int padding5;
+ int padding6;
+ pthread_t darwin_pthread_rwlock_owner;
+ // ...other stuff we don't care about.
+};
+
+#endif // __APPLE__
+
+#if defined(__GLIBC__)
+
+struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
+ int32_t padding0[2];
+ int owner;
+ // ...other stuff we don't care about.
+};
+
+struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
+#ifdef __LP64__
+ int32_t padding0[6];
+#else
+ int32_t padding0[7];
+#endif
+ int writer;
+ // ...other stuff we don't care about.
+};
+
+#endif // __GLIBC__
+
class ScopedContentionRecorder {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
@@ -82,7 +130,7 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
// TODO: tighten this check.
if (kDebugLocking) {
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown() ||
+ CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
level == kDefaultMutexLevel || level == kRuntimeShutdownLock ||
level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
}
@@ -185,6 +233,84 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
#endif
}
+inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
+ DCHECK(self == NULL || self == Thread::Current());
+ bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
+ if (kDebugLocking) {
+ // Sanity debug check that if we think it is locked we have it in our held mutexes.
+ if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
+ }
+ return result;
+}
+
+inline uint64_t Mutex::GetExclusiveOwnerTid() const {
+#if ART_USE_FUTEXES
+ return exclusive_owner_;
+#elif defined(__BIONIC__)
+ return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
+#elif defined(__GLIBC__)
+ return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
+#elif defined(__APPLE__)
+ const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
+ pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
+ // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
+ // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
+ if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
+ return 0;
+ }
+ uint64_t tid;
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
+ return tid;
+#else
+#error unsupported C library
+#endif
+}
+
+inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
+ DCHECK(self == NULL || self == Thread::Current());
+ bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
+ if (kDebugLocking) {
+ // Sanity that if the pthread thinks we own the lock the Thread agrees.
+ if (self != NULL && result) {
+ CHECK_EQ(self->GetHeldMutex(level_), this);
+ }
+ }
+ return result;
+}
+
+inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
+#if ART_USE_FUTEXES
+ int32_t state = state_;
+ if (state == 0) {
+ return 0; // No owner.
+ } else if (state > 0) {
+ return -1; // Shared.
+ } else {
+ return exclusive_owner_;
+ }
+#else
+#if defined(__BIONIC__)
+ return rwlock_.writerThreadId;
+#elif defined(__GLIBC__)
+ return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
+#elif defined(__APPLE__)
+ const darwin_pthread_rwlock_t*
+ dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
+ pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
+ if (owner == (pthread_t)0) {
+ return 0;
+ }
+ uint64_t tid;
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
+ return tid;
+#else
+#error unsupported C library
+#endif
+#endif
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_INL_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b99e7c9..ec79c55 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -31,54 +31,6 @@
namespace art {
-#if defined(__APPLE__)
-
-// This works on Mac OS 10.6 but hasn't been tested on older releases.
-struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- int padding1;
- uint32_t padding2;
- int16_t padding3;
- int16_t padding4;
- uint32_t padding5;
- pthread_t darwin_pthread_mutex_owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
- long padding0; // NOLINT(runtime/int) exact match to darwin type
- pthread_mutex_t padding1;
- int padding2;
- pthread_cond_t padding3;
- pthread_cond_t padding4;
- int padding5;
- int padding6;
- pthread_t darwin_pthread_rwlock_owner;
- // ...other stuff we don't care about.
-};
-
-#endif // __APPLE__
-
-#if defined(__GLIBC__)
-
-struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
- int32_t padding0[2];
- int owner;
- // ...other stuff we don't care about.
-};
-
-struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
-#ifdef __LP64__
- int32_t padding0[6];
-#else
- int32_t padding0[7];
-#endif
- int writer;
- // ...other stuff we don't care about.
-};
-
-#endif // __GLIBC__
-
#if ART_USE_FUTEXES
static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
@@ -102,17 +54,17 @@ struct AllMutexData {
std::set<BaseMutex*>* all_mutexes;
AllMutexData() : all_mutexes(NULL) {}
};
-static struct AllMutexData all_mutex_data[kAllMutexDataSize];
+static struct AllMutexData gAllMutexData[kAllMutexDataSize];
class ScopedAllMutexesLock {
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
- while (!all_mutex_data->all_mutexes_guard.compare_and_swap(0, reinterpret_cast<int32_t>(mutex))) {
+ while (!gAllMutexData->all_mutexes_guard.compare_and_swap(0, reinterpret_cast<int32_t>(mutex))) {
NanoSleep(100);
}
}
~ScopedAllMutexesLock() {
- while (!all_mutex_data->all_mutexes_guard.compare_and_swap(reinterpret_cast<int32_t>(mutex_), 0)) {
+ while (!gAllMutexData->all_mutexes_guard.compare_and_swap(reinterpret_cast<int32_t>(mutex_), 0)) {
NanoSleep(100);
}
}
@@ -123,7 +75,7 @@ class ScopedAllMutexesLock {
BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
- std::set<BaseMutex*>** all_mutexes_ptr = &all_mutex_data->all_mutexes;
+ std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
if (*all_mutexes_ptr == NULL) {
// We leak the global set of all mutexes to avoid ordering issues in global variable
// construction/destruction.
@@ -136,7 +88,7 @@ BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(n
BaseMutex::~BaseMutex() {
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
- all_mutex_data->all_mutexes->erase(this);
+ gAllMutexData->all_mutexes->erase(this);
}
}
@@ -144,13 +96,13 @@ void BaseMutex::DumpAll(std::ostream& os) {
if (kLogLockContentions) {
os << "Mutex logging:\n";
ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
- std::set<BaseMutex*>* all_mutexes = all_mutex_data->all_mutexes;
+ std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
if (all_mutexes == NULL) {
// No mutexes have been created yet during at startup.
return;
}
typedef std::set<BaseMutex*>::const_iterator It;
- os << "(Contented)\n";
+ os << "(Contended)\n";
for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
BaseMutex* mutex = *it;
if (mutex->HasEverContended()) {
@@ -175,7 +127,8 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
return;
}
if (kDebugLocking) {
- CHECK(self->GetHeldMutex(level_) == this) << "Waiting on unacquired mutex: " << name_;
+ CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
+ << "Waiting on unacquired mutex: " << name_;
bool bad_mutexes_held = false;
for (int i = kLockLevelCount - 1; i >= 0; --i) {
if (i != level_) {
@@ -313,9 +266,8 @@ Mutex::Mutex(const char* name, LockLevel level, bool recursive)
Mutex::~Mutex() {
#if ART_USE_FUTEXES
if (state_ != 0) {
- MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
} else {
CHECK_EQ(exclusive_owner_, 0U) << "unexpectedly found an owner on unlocked mutex " << name_;
@@ -330,7 +282,7 @@ Mutex::~Mutex() {
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
}
#endif
@@ -346,7 +298,7 @@ void Mutex::ExclusiveLock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 0) {
+ if (LIKELY(cur_state == 0)) {
// Change state from 0 to 1.
done = android_atomic_acquire_cas(0, 1, &state_) == 0;
} else {
@@ -432,14 +384,14 @@ void Mutex::ExclusiveUnlock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 1) {
+ if (LIKELY(cur_state == 1)) {
// We're no longer the owner.
exclusive_owner_ = 0;
// Change state to 0.
done = android_atomic_release_cas(cur_state, 0, &state_) == 0;
- if (done) { // Spurious fail?
+ if (LIKELY(done)) { // Spurious fail?
// Wake a contender
- if (num_contenders_ > 0) {
+ if (UNLIKELY(num_contenders_ > 0)) {
futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
}
}
@@ -461,41 +413,6 @@ void Mutex::ExclusiveUnlock(Thread* self) {
}
}
-bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
- bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
- if (kDebugLocking) {
- // Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
- }
- }
- return result;
-}
-
-uint64_t Mutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
- return exclusive_owner_;
-#elif defined(__BIONIC__)
- return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
-#elif defined(__APPLE__)
- const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
- pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
- // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
- // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
- if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
-}
-
void Mutex::Dump(std::ostream& os) const {
os << (recursive_ ? "recursive " : "non-recursive ")
<< name_
@@ -536,7 +453,7 @@ ReaderWriterMutex::~ReaderWriterMutex() {
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = runtime == NULL || runtime->IsShuttingDown();
+ bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
}
#endif
@@ -549,7 +466,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == 0) {
+ if (LIKELY(cur_state == 0)) {
// Change state from 0 to -1.
done = android_atomic_acquire_cas(0, -1, &state_) == 0;
} else {
@@ -583,14 +500,14 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
bool done = false;
do {
int32_t cur_state = state_;
- if (cur_state == -1) {
+ if (LIKELY(cur_state == -1)) {
// We're no longer the owner.
exclusive_owner_ = 0;
// Change state from -1 to 0.
done = android_atomic_release_cas(-1, 0, &state_) == 0;
- if (done) { // cmpxchg may fail due to noise?
+ if (LIKELY(done)) { // cmpxchg may fail due to noise?
// Wake any waiters.
- if (num_pending_readers_ > 0 || num_pending_writers_ > 0) {
+ if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) {
futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
}
}
@@ -687,18 +604,6 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) {
return true;
}
-bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
- bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
- if (kDebugLocking) {
- // Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
- CHECK_EQ(self->GetHeldMutex(level_), this);
- }
- }
- return result;
-}
-
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
DCHECK(self == NULL || self == Thread::Current());
bool result;
@@ -710,37 +615,6 @@ bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
return result;
}
-uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
-#if ART_USE_FUTEXES
- int32_t state = state_;
- if (state == 0) {
- return 0; // No owner.
- } else if (state > 0) {
- return -1; // Shared.
- } else {
- return exclusive_owner_;
- }
-#else
-#if defined(__BIONIC__)
- return rwlock_.writerThreadId;
-#elif defined(__GLIBC__)
- return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
-#elif defined(__APPLE__)
- const darwin_pthread_rwlock_t*
- dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
- pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
- if (owner == (pthread_t)0) {
- return 0;
- }
- uint64_t tid;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
- return tid;
-#else
-#error unsupported C library
-#endif
-#endif
-}
-
void ReaderWriterMutex::Dump(std::ostream& os) const {
os << name_
<< " level=" << static_cast<int>(level_)
@@ -766,9 +640,8 @@ ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
ConditionVariable::~ConditionVariable() {
#if ART_USE_FUTEXES
if (num_waiters_!= 0) {
- MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
<< " called with " << num_waiters_ << " waiters.";
}
@@ -780,7 +653,7 @@ ConditionVariable::~ConditionVariable() {
errno = rc;
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown();
+ bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
}
#endif
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3e05b10..b894c0a 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -58,7 +58,7 @@ const bool kLogLockContentions = false;
// futex.
const bool kLogLockContentions = false;
#endif
-const size_t kContentionLogSize = 64;
+const size_t kContentionLogSize = 4;
const size_t kContentionLogDataSize = kLogLockContentions ? 1 : 0;
const size_t kAllMutexDataSize = kLogLockContentions ? 1 : 0;
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index 11dc542..c8dee6d 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -22,7 +22,7 @@
#include "timing_logger.h"
#include "base/logging.h"
-#include "thread.h"
+#include "thread-inl.h"
#include "base/stl_util.h"
#include "base/histogram-inl.h"
@@ -39,7 +39,7 @@ CumulativeLogger::CumulativeLogger(const std::string& name)
}
CumulativeLogger::~CumulativeLogger() {
- STLDeleteValues(&histograms_);
+ STLDeleteElements(&histograms_);
}
void CumulativeLogger::SetName(const std::string& name) {
@@ -57,7 +57,7 @@ void CumulativeLogger::End() {
void CumulativeLogger::Reset() {
MutexLock mu(Thread::Current(), lock_);
iterations_ = 0;
- STLDeleteValues(&histograms_);
+ STLDeleteElements(&histograms_);
}
uint64_t CumulativeLogger::GetTotalNs() const {
@@ -67,60 +67,72 @@ uint64_t CumulativeLogger::GetTotalNs() const {
uint64_t CumulativeLogger::GetTotalTime() const {
MutexLock mu(Thread::Current(), lock_);
uint64_t total = 0;
- for (CumulativeLogger::HistogramsIterator it = histograms_.begin(), end = histograms_.end();
- it != end; ++it) {
- total += it->second->Sum();
+ for (Histogram<uint64_t>* histogram : histograms_) {
+ total += histogram->Sum();
}
return total;
}
-void CumulativeLogger::AddLogger(const base::TimingLogger &logger) {
+void CumulativeLogger::AddLogger(const TimingLogger &logger) {
MutexLock mu(Thread::Current(), lock_);
- const base::TimingLogger::SplitTimings& splits = logger.GetSplits();
- for (base::TimingLogger::SplitTimingsIterator it = splits.begin(), end = splits.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ const TimingLogger::SplitTimings& splits = logger.GetSplits();
+ for (auto it = splits.begin(), end = splits.end(); it != end; ++it) {
+ TimingLogger::SplitTiming split = *it;
uint64_t split_time = split.first;
const char* split_name = split.second;
AddPair(split_name, split_time);
}
}
+size_t CumulativeLogger::GetIterations() const {
+ MutexLock mu(Thread::Current(), lock_);
+ return iterations_;
+}
+
void CumulativeLogger::Dump(std::ostream &os) {
MutexLock mu(Thread::Current(), lock_);
DumpHistogram(os);
}
-void CumulativeLogger::AddPair(const std::string &label, uint64_t delta_time) {
+void CumulativeLogger::AddPair(const std::string& label, uint64_t delta_time) {
// Convert delta time to microseconds so that we don't overflow our counters.
delta_time /= kAdjust;
- if (histograms_.find(label) == histograms_.end()) {
- // TODO: Shoud this be a defined constant so we we know out of which orifice 16 and 100 were picked?
- const size_t max_buckets = Runtime::Current()->GetHeap()->IsLowMemoryMode() ? 16 : 100;
- // TODO: Should this be a defined constant so we know 50 of WTF?
- histograms_[label] = new Histogram<uint64_t>(label.c_str(), 50, max_buckets);
+ Histogram<uint64_t>* histogram;
+ Histogram<uint64_t> dummy(label.c_str());
+ auto it = histograms_.find(&dummy);
+ if (it == histograms_.end()) {
+ const size_t max_buckets = Runtime::Current()->GetHeap()->IsLowMemoryMode() ?
+ kLowMemoryBucketCount : kDefaultBucketCount;
+ histogram = new Histogram<uint64_t>(label.c_str(), kInitialBucketSize, max_buckets);
+ histograms_.insert(histogram);
+ } else {
+ histogram = *it;
}
- histograms_[label]->AddValue(delta_time);
+ histogram->AddValue(delta_time);
}
+class CompareHistorgramByTimeSpentDeclining {
+ public:
+ bool operator()(const Histogram<uint64_t>* a, const Histogram<uint64_t>* b) const {
+ return a->Sum() > b->Sum();
+ }
+};
+
void CumulativeLogger::DumpHistogram(std::ostream &os) {
os << "Start Dumping histograms for " << iterations_ << " iterations"
<< " for " << name_ << "\n";
- for (CumulativeLogger::HistogramsIterator it = histograms_.begin(), end = histograms_.end();
- it != end; ++it) {
+ std::set<Histogram<uint64_t>*, CompareHistorgramByTimeSpentDeclining>
+ sorted_histograms(histograms_.begin(), histograms_.end());
+ for (Histogram<uint64_t>* histogram : sorted_histograms) {
Histogram<uint64_t>::CumulativeData cumulative_data;
- it->second->CreateHistogram(cumulative_data);
- it->second->PrintConfidenceIntervals(os, 0.99, cumulative_data);
- // Reset cumulative values to save memory. We don't expect DumpHistogram to be called often, so
- // it is not performance critical.
+ // We don't expect DumpHistogram to be called often, so it is not performance critical.
+ histogram->CreateHistogram(&cumulative_data);
+ histogram->PrintConfidenceIntervals(os, 0.99, cumulative_data);
}
os << "Done Dumping histograms \n";
}
-
-namespace base {
-
TimingLogger::TimingLogger(const char* name, bool precise, bool verbose)
: name_(name), precise_(precise), verbose_(verbose), current_split_(NULL) {
}
@@ -131,33 +143,35 @@ void TimingLogger::Reset() {
}
void TimingLogger::StartSplit(const char* new_split_label) {
- DCHECK(new_split_label != NULL) << "Starting split (" << new_split_label << ") with null label.";
- TimingLogger::ScopedSplit* explicit_scoped_split = new TimingLogger::ScopedSplit(new_split_label, this);
+ DCHECK(new_split_label != nullptr) << "Starting split with null label.";
+ TimingLogger::ScopedSplit* explicit_scoped_split =
+ new TimingLogger::ScopedSplit(new_split_label, this);
explicit_scoped_split->explicit_ = true;
}
void TimingLogger::EndSplit() {
- CHECK(current_split_ != NULL) << "Ending a non-existent split.";
- DCHECK(current_split_->label_ != NULL);
- DCHECK(current_split_->explicit_ == true) << "Explicitly ending scoped split: " << current_split_->label_;
-
+ CHECK(current_split_ != nullptr) << "Ending a non-existent split.";
+ DCHECK(current_split_->label_ != nullptr);
+ DCHECK(current_split_->explicit_ == true)
+ << "Explicitly ending scoped split: " << current_split_->label_;
delete current_split_;
+ // TODO: current_split_ = nullptr;
}
// Ends the current split and starts the one given by the label.
void TimingLogger::NewSplit(const char* new_split_label) {
- CHECK(current_split_ != NULL) << "Inserting a new split (" << new_split_label
- << ") into a non-existent split.";
- DCHECK(new_split_label != NULL) << "New split (" << new_split_label << ") with null label.";
-
- current_split_->TailInsertSplit(new_split_label);
+ if (current_split_ == nullptr) {
+ StartSplit(new_split_label);
+ } else {
+ DCHECK(new_split_label != nullptr) << "New split (" << new_split_label << ") with null label.";
+ current_split_->TailInsertSplit(new_split_label);
+ }
}
uint64_t TimingLogger::GetTotalNs() const {
uint64_t total_ns = 0;
- for (base::TimingLogger::SplitTimingsIterator it = splits_.begin(), end = splits_.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+ TimingLogger::SplitTiming split = *it;
total_ns += split.first;
}
return total_ns;
@@ -166,9 +180,8 @@ uint64_t TimingLogger::GetTotalNs() const {
void TimingLogger::Dump(std::ostream &os) const {
uint64_t longest_split = 0;
uint64_t total_ns = 0;
- for (base::TimingLogger::SplitTimingsIterator it = splits_.begin(), end = splits_.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+ TimingLogger::SplitTiming split = *it;
uint64_t split_time = split.first;
longest_split = std::max(longest_split, split_time);
total_ns += split_time;
@@ -177,9 +190,8 @@ void TimingLogger::Dump(std::ostream &os) const {
TimeUnit tu = GetAppropriateTimeUnit(longest_split);
uint64_t divisor = GetNsToTimeUnitDivisor(tu);
// Print formatted splits.
- for (base::TimingLogger::SplitTimingsIterator it = splits_.begin(), end = splits_.end();
- it != end; ++it) {
- base::TimingLogger::SplitTiming split = *it;
+ for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+ const TimingLogger::SplitTiming& split = *it;
uint64_t split_time = split.first;
if (!precise_ && divisor >= 1000) {
// Make the fractional part 0.
@@ -226,7 +238,7 @@ TimingLogger::ScopedSplit::~ScopedSplit() {
LOG(INFO) << "End: " << label_ << " " << PrettyDuration(split_time);
}
- // If one or more enclosed explcitly started splits are not terminated we can
+ // If one or more enclosed explicitly started splits are not terminated we can
// either fail or "unwind" the stack of splits in the timing logger to 'this'
// (by deleting the intervening scoped splits). This implements the latter.
TimingLogger::ScopedSplit* current = timing_logger_->current_split_;
@@ -288,5 +300,4 @@ void TimingLogger::ScopedSplit::Resume() {
ATRACE_BEGIN(label_);
}
-} // namespace base
} // namespace art
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index 07d1ee0..c1ff0a3 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -21,15 +21,12 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include <set>
#include <string>
#include <vector>
-#include <map>
namespace art {
-
-namespace base {
- class TimingLogger;
-} // namespace base
+class TimingLogger;
class CumulativeLogger {
public:
@@ -44,18 +41,27 @@ class CumulativeLogger {
// Allow the name to be modified, particularly when the cumulative logger is a field within a
// parent class that is unable to determine the "name" of a sub-class.
void SetName(const std::string& name);
- void AddLogger(const base::TimingLogger& logger) LOCKS_EXCLUDED(lock_);
+ void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
+ size_t GetIterations() const;
private:
- typedef std::map<std::string, Histogram<uint64_t> *> Histograms;
- typedef std::map<std::string, Histogram<uint64_t> *>::const_iterator HistogramsIterator;
+ class HistogramComparator {
+ public:
+ bool operator()(const Histogram<uint64_t>* a, const Histogram<uint64_t>* b) const {
+ return a->Name() < b->Name();
+ }
+ };
+
+ static constexpr size_t kLowMemoryBucketCount = 16;
+ static constexpr size_t kDefaultBucketCount = 100;
+ static constexpr size_t kInitialBucketSize = 50; // 50 microseconds.
void AddPair(const std::string &label, uint64_t delta_time)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_);
uint64_t GetTotalTime() const;
static const uint64_t kAdjust = 1000;
- Histograms histograms_ GUARDED_BY(lock_);
+ std::set<Histogram<uint64_t>*, HistogramComparator> histograms_ GUARDED_BY(lock_);
std::string name_;
const std::string lock_name_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -64,19 +70,17 @@ class CumulativeLogger {
DISALLOW_COPY_AND_ASSIGN(CumulativeLogger);
};
-namespace base {
-
-
// A timing logger that knows when a split starts for the purposes of logging tools, like systrace.
class TimingLogger {
public:
// Splits are nanosecond times and split names.
typedef std::pair<uint64_t, const char*> SplitTiming;
typedef std::vector<SplitTiming> SplitTimings;
- typedef std::vector<SplitTiming>::const_iterator SplitTimingsIterator;
explicit TimingLogger(const char* name, bool precise, bool verbose);
-
+ ~TimingLogger() {
+ // TODO: DCHECK(current_split_ == nullptr) << "Forgot to end split: " << current_split_->label_;
+ }
// Clears current splits and labels.
void Reset();
@@ -142,7 +146,7 @@ class TimingLogger {
friend class ScopedSplit;
protected:
// The name of the timing logger.
- const char* name_;
+ const char* const name_;
// Do we want to print the exactly recorded split (true) or round down to the time unit being
// used (false).
@@ -161,7 +165,6 @@ class TimingLogger {
DISALLOW_COPY_AND_ASSIGN(TimingLogger);
};
-} // namespace base
} // namespace art
#endif // ART_RUNTIME_BASE_TIMING_LOGGER_H_
diff --git a/runtime/base/timing_logger_test.cc b/runtime/base/timing_logger_test.cc
index 8f28e48..03cc9cc 100644
--- a/runtime/base/timing_logger_test.cc
+++ b/runtime/base/timing_logger_test.cc
@@ -26,13 +26,13 @@ class TimingLoggerTest : public CommonTest {};
TEST_F(TimingLoggerTest, StartEnd) {
const char* split1name = "First Split";
- base::TimingLogger timings("StartEnd", true, false);
+ TimingLogger timings("StartEnd", true, false);
timings.StartSplit(split1name);
timings.EndSplit(); // Ends split1.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(1U, splits.size());
EXPECT_STREQ(splits[0].second, split1name);
@@ -43,7 +43,7 @@ TEST_F(TimingLoggerTest, StartNewEnd) {
const char* split1name = "First Split";
const char* split2name = "Second Split";
const char* split3name = "Third Split";
- base::TimingLogger timings("StartNewEnd", true, false);
+ TimingLogger timings("StartNewEnd", true, false);
timings.StartSplit(split1name);
@@ -53,7 +53,7 @@ TEST_F(TimingLoggerTest, StartNewEnd) {
timings.EndSplit(); // Ends split3.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(3U, splits.size());
EXPECT_STREQ(splits[0].second, split1name);
@@ -67,7 +67,7 @@ TEST_F(TimingLoggerTest, StartNewEndNested) {
const char* split3name = "Third Split";
const char* split4name = "Fourth Split";
const char* split5name = "Fifth Split";
- base::TimingLogger timings("StartNewEndNested", true, false);
+ TimingLogger timings("StartNewEndNested", true, false);
timings.StartSplit(split1name);
@@ -85,7 +85,7 @@ TEST_F(TimingLoggerTest, StartNewEndNested) {
timings.EndSplit(); // Ends split2.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(5U, splits.size());
EXPECT_STREQ(splits[0].second, split1name);
@@ -101,25 +101,25 @@ TEST_F(TimingLoggerTest, Scoped) {
const char* innersplit1 = "Inner Split 1";
const char* innerinnersplit1 = "Inner Inner Split 1";
const char* innersplit2 = "Inner Split 2";
- base::TimingLogger timings("Scoped", true, false);
+ TimingLogger timings("Scoped", true, false);
{
- base::TimingLogger::ScopedSplit outer(outersplit, &timings);
+ TimingLogger::ScopedSplit outer(outersplit, &timings);
{
- base::TimingLogger::ScopedSplit inner1(innersplit1, &timings);
+ TimingLogger::ScopedSplit inner1(innersplit1, &timings);
{
- base::TimingLogger::ScopedSplit innerinner1(innerinnersplit1, &timings);
+ TimingLogger::ScopedSplit innerinner1(innerinnersplit1, &timings);
} // Ends innerinnersplit1.
} // Ends innersplit1.
{
- base::TimingLogger::ScopedSplit inner2(innersplit2, &timings);
+ TimingLogger::ScopedSplit inner2(innersplit2, &timings);
} // Ends innersplit2.
} // Ends outersplit.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(4U, splits.size());
EXPECT_STREQ(splits[0].second, innerinnersplit1);
@@ -134,12 +134,12 @@ TEST_F(TimingLoggerTest, ScopedAndExplicit) {
const char* innersplit = "Inner Split";
const char* innerinnersplit1 = "Inner Inner Split 1";
const char* innerinnersplit2 = "Inner Inner Split 2";
- base::TimingLogger timings("Scoped", true, false);
+ TimingLogger timings("Scoped", true, false);
timings.StartSplit(outersplit);
{
- base::TimingLogger::ScopedSplit inner(innersplit, &timings);
+ TimingLogger::ScopedSplit inner(innersplit, &timings);
timings.StartSplit(innerinnersplit1);
@@ -148,7 +148,7 @@ TEST_F(TimingLoggerTest, ScopedAndExplicit) {
timings.EndSplit(); // Ends outersplit.
- const base::TimingLogger::SplitTimings& splits = timings.GetSplits();
+ const TimingLogger::SplitTimings& splits = timings.GetSplits();
EXPECT_EQ(4U, splits.size());
EXPECT_STREQ(splits[0].second, innerinnersplit1);
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 36f8ba7..f48c76d 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -102,10 +102,6 @@ bool FdFile::IsOpened() const {
return fd_ >= 0;
}
-std::string FdFile::GetPath() const {
- return file_path_;
-}
-
bool FdFile::ReadFully(void* buffer, int64_t byte_count) {
char* ptr = static_cast<char*>(buffer);
while (byte_count > 0) {
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 79a0db9..19e3511 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -57,7 +57,9 @@ class FdFile : public RandomAccessFile {
// Bonus API.
int Fd() const;
bool IsOpened() const;
- std::string GetPath() const;
+ const std::string& GetPath() const {
+ return file_path_;
+ }
void DisableAutoClose();
bool ReadFully(void* buffer, int64_t byte_count);
bool WriteFully(const void* buffer, int64_t byte_count);