diff options
author | levin@chromium.org <levin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-03-28 01:54:15 +0000 |
---|---|---|
committer | levin@chromium.org <levin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-03-28 01:54:15 +0000 |
commit | 3b63f8f451afcf414a59c529f627c620e4d449d9 (patch) | |
tree | 2dcbab1c060b29a260c29bb19b67bf97a8293ca3 /base/memory | |
parent | 9174a108509c2aafe513da68e6e63fbc7df38c85 (diff) | |
download | chromium_src-3b63f8f451afcf414a59c529f627c620e4d449d9.zip chromium_src-3b63f8f451afcf414a59c529f627c620e4d449d9.tar.gz chromium_src-3b63f8f451afcf414a59c529f627c620e4d449d9.tar.bz2 |
Move some files from base to base/memory.
raw_scoped_refptr_mismatch_checker.h
ref_counted.cc
ref_counted.h
ref_counted_memory.cc
ref_counted_memory.h
ref_counted_unittest.cc
scoped_callback_factory.h
scoped_comptr_win.h
scoped_handle.h
scoped_native_library.cc
scoped_native_library.h
scoped_native_library_unittest.cc
scoped_nsobject.h
scoped_open_process.h
scoped_ptr.h
scoped_ptr_unittest.cc
scoped_temp_dir.cc
scoped_temp_dir.h
scoped_temp_dir_unittest.cc
scoped_vector.h
singleton.h
singleton_objc.h
singleton_unittest.cc
linked_ptr.h
linked_ptr_unittest.cc
weak_ptr.cc
weak_ptr.h
weak_ptr_unittest.cc
BUG=None
TEST=Compile
Review URL: http://codereview.chromium.org/6714032
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@79524 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base/memory')
29 files changed, 3557 insertions, 0 deletions
diff --git a/base/memory/linked_ptr.h b/base/memory/linked_ptr.h new file mode 100644 index 0000000..41931d8 --- /dev/null +++ b/base/memory/linked_ptr.h @@ -0,0 +1,182 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// A "smart" pointer type with reference tracking. Every pointer to a +// particular object is kept on a circular linked list. When the last pointer +// to an object is destroyed or reassigned, the object is deleted. +// +// Used properly, this deletes the object when the last reference goes away. +// There are several caveats: +// - Like all reference counting schemes, cycles lead to leaks. +// - Each smart pointer is actually two pointers (8 bytes instead of 4). +// - Every time a pointer is released, the entire list of pointers to that +// object is traversed. This class is therefore NOT SUITABLE when there +// will often be more than two or three pointers to a particular object. +// - References are only tracked as long as linked_ptr<> objects are copied. +// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS +// will happen (double deletion). +// +// A good use of this class is storing object references in STL containers. +// You can safely put linked_ptr<> in a vector<>. +// Other uses may not be as good. +// +// Note: If you use an incomplete type with linked_ptr<>, the class +// *containing* linked_ptr<> must have a constructor and destructor (even +// if they do nothing!). +// +// Thread Safety: +// A linked_ptr is NOT thread safe. Copying a linked_ptr object is +// effectively a read-write operation. +// +// Alternative: to linked_ptr is shared_ptr, which +// - is also two pointers in size (8 bytes for 32 bit addresses) +// - is thread safe for copying and deletion +// - supports weak_ptrs + +#ifndef BASE_MEMORY_LINKED_PTR_H_ +#define BASE_MEMORY_LINKED_PTR_H_ +#pragma once + +#include "base/logging.h" // for CHECK macros + +// This is used internally by all instances of linked_ptr<>. It needs to be +// a non-template class because different types of linked_ptr<> can refer to +// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)). +// So, it needs to be possible for different types of linked_ptr to participate +// in the same circular linked list, so we need a single class type here. +// +// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>. +class linked_ptr_internal { + public: + // Create a new circle that includes only this instance. + void join_new() { + next_ = this; + } + + // Join an existing circle. + void join(linked_ptr_internal const* ptr) { + next_ = ptr->next_; + ptr->next_ = this; + } + + // Leave whatever circle we're part of. Returns true iff we were the + // last member of the circle. Once this is done, you can join() another. + bool depart() { + if (next_ == this) return true; + linked_ptr_internal const* p = next_; + while (p->next_ != this) p = p->next_; + p->next_ = next_; + return false; + } + + private: + mutable linked_ptr_internal const* next_; +}; + +template <typename T> +class linked_ptr { + public: + typedef T element_type; + + // Take over ownership of a raw pointer. This should happen as soon as + // possible after the object is created. + explicit linked_ptr(T* ptr = NULL) { capture(ptr); } + ~linked_ptr() { depart(); } + + // Copy an existing linked_ptr<>, adding ourselves to the list of references. + template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); } + + linked_ptr(linked_ptr const& ptr) { + DCHECK_NE(&ptr, this); + copy(&ptr); + } + + // Assignment releases the old value and acquires the new. + template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) { + depart(); + copy(&ptr); + return *this; + } + + linked_ptr& operator=(linked_ptr const& ptr) { + if (&ptr != this) { + depart(); + copy(&ptr); + } + return *this; + } + + // Smart pointer members. + void reset(T* ptr = NULL) { + depart(); + capture(ptr); + } + T* get() const { return value_; } + T* operator->() const { return value_; } + T& operator*() const { return *value_; } + // Release ownership of the pointed object and returns it. + // Sole ownership by this linked_ptr object is required. + T* release() { + bool last = link_.depart(); + CHECK(last); + T* v = value_; + value_ = NULL; + return v; + } + + bool operator==(const T* p) const { return value_ == p; } + bool operator!=(const T* p) const { return value_ != p; } + template <typename U> + bool operator==(linked_ptr<U> const& ptr) const { + return value_ == ptr.get(); + } + template <typename U> + bool operator!=(linked_ptr<U> const& ptr) const { + return value_ != ptr.get(); + } + + private: + template <typename U> + friend class linked_ptr; + + T* value_; + linked_ptr_internal link_; + + void depart() { + if (link_.depart()) delete value_; + } + + void capture(T* ptr) { + value_ = ptr; + link_.join_new(); + } + + template <typename U> void copy(linked_ptr<U> const* ptr) { + value_ = ptr->get(); + if (value_) + link_.join(&ptr->link_); + else + link_.join_new(); + } +}; + +template<typename T> inline +bool operator==(T* ptr, const linked_ptr<T>& x) { + return ptr == x.get(); +} + +template<typename T> inline +bool operator!=(T* ptr, const linked_ptr<T>& x) { + return ptr != x.get(); +} + +// A function to convert T* into linked_ptr<T> +// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation +// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg)) +template <typename T> +linked_ptr<T> make_linked_ptr(T* ptr) { + return linked_ptr<T>(ptr); +} + +#endif // BASE_MEMORY_LINKED_PTR_H_ diff --git a/base/memory/linked_ptr_unittest.cc b/base/memory/linked_ptr_unittest.cc new file mode 100644 index 0000000..ae10fc28 --- /dev/null +++ b/base/memory/linked_ptr_unittest.cc @@ -0,0 +1,108 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <string> + +#include "base/memory/linked_ptr.h" +#include "base/stringprintf.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +int num = 0; + +std::string history; + +// Class which tracks allocation/deallocation +struct A { + A(): mynum(num++) { history += base::StringPrintf("A%d ctor\n", mynum); } + virtual ~A() { history += base::StringPrintf("A%d dtor\n", mynum); } + virtual void Use() { history += base::StringPrintf("A%d use\n", mynum); } + int mynum; +}; + +// Subclass +struct B: public A { + B() { history += base::StringPrintf("B%d ctor\n", mynum); } + ~B() { history += base::StringPrintf("B%d dtor\n", mynum); } + virtual void Use() { history += base::StringPrintf("B%d use\n", mynum); } +}; + +} // namespace + +TEST(LinkedPtrTest, Test) { + { + linked_ptr<A> a0, a1, a2; + a0 = a0; + a1 = a2; + ASSERT_EQ(a0.get(), static_cast<A*>(NULL)); + ASSERT_EQ(a1.get(), static_cast<A*>(NULL)); + ASSERT_EQ(a2.get(), static_cast<A*>(NULL)); + ASSERT_TRUE(a0 == NULL); + ASSERT_TRUE(a1 == NULL); + ASSERT_TRUE(a2 == NULL); + + { + linked_ptr<A> a3(new A); + a0 = a3; + ASSERT_TRUE(a0 == a3); + ASSERT_TRUE(a0 != NULL); + ASSERT_TRUE(a0.get() == a3); + ASSERT_TRUE(a0 == a3.get()); + linked_ptr<A> a4(a0); + a1 = a4; + linked_ptr<A> a5(new A); + ASSERT_TRUE(a5.get() != a3); + ASSERT_TRUE(a5 != a3.get()); + a2 = a5; + linked_ptr<B> b0(new B); + linked_ptr<A> a6(b0); + ASSERT_TRUE(b0 == a6); + ASSERT_TRUE(a6 == b0); + ASSERT_TRUE(b0 != NULL); + a5 = b0; + a5 = b0; + a3->Use(); + a4->Use(); + a5->Use(); + a6->Use(); + b0->Use(); + (*b0).Use(); + b0.get()->Use(); + } + + a0->Use(); + a1->Use(); + a2->Use(); + + a1 = a2; + a2.reset(new A); + a0.reset(); + + linked_ptr<A> a7; + } + + ASSERT_EQ(history, + "A0 ctor\n" + "A1 ctor\n" + "A2 ctor\n" + "B2 ctor\n" + "A0 use\n" + "A0 use\n" + "B2 use\n" + "B2 use\n" + "B2 use\n" + "B2 use\n" + "B2 use\n" + "B2 dtor\n" + "A2 dtor\n" + "A0 use\n" + "A0 use\n" + "A1 use\n" + "A3 ctor\n" + "A0 dtor\n" + "A3 dtor\n" + "A1 dtor\n" + ); +} diff --git a/base/memory/memory_debug.cc b/base/memory/memory_debug.cc new file mode 100644 index 0000000..f020b94 --- /dev/null +++ b/base/memory/memory_debug.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/memory_debug.h" + +#ifdef PURIFY +// this #define is used to prevent people from directly using pure.h +// instead of memory_debug.h +#define PURIFY_PRIVATE_INCLUDE +#include "base/third_party/purify/pure.h" +#endif + +namespace base { + +bool MemoryDebug::memory_in_use_ = false; + +void MemoryDebug::SetMemoryInUseEnabled(bool enabled) { + memory_in_use_ = enabled; +} + +void MemoryDebug::DumpAllMemoryInUse() { +#ifdef PURIFY + if (memory_in_use_) + PurifyAllInuse(); +#endif +} + +void MemoryDebug::DumpNewMemoryInUse() { +#ifdef PURIFY + if (memory_in_use_) + PurifyNewInuse(); +#endif +} + +void MemoryDebug::DumpAllLeaks() { +#ifdef PURIFY + PurifyAllLeaks(); +#endif +} + +void MemoryDebug::DumpNewLeaks() { +#ifdef PURIFY + PurifyNewLeaks(); +#endif +} + +void MemoryDebug::MarkAsInitialized(void* addr, size_t size) { +#ifdef PURIFY + PurifyMarkAsInitialized(addr, size); +#endif +} + +} // namespace base diff --git a/base/memory/memory_debug.h b/base/memory/memory_debug.h new file mode 100644 index 0000000..9cc6c61 --- /dev/null +++ b/base/memory/memory_debug.h @@ -0,0 +1,47 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Functions used to debug memory usage, leaks, and other memory issues. +// All methods are effectively no-ops unless this program is being run through +// a supported memory tool (currently, only Purify) + +#ifndef BASE_MEMORY_MEMORY_DEBUG_H_ +#define BASE_MEMORY_MEMORY_DEBUG_H_ +#pragma once + +#include "base/basictypes.h" + +namespace base { + +class MemoryDebug { + public: + // Since MIU messages are a lot of data, and we don't always want this data, + // we have a global switch. If disabled, *MemoryInUse are no-ops. + static void SetMemoryInUseEnabled(bool enabled); + + // Dump information about all memory in use. + static void DumpAllMemoryInUse(); + // Dump information about new memory in use since the last + // call to DumpAllMemoryInUse() or DumpNewMemoryInUse(). + static void DumpNewMemoryInUse(); + + // Dump information about all current memory leaks. + static void DumpAllLeaks(); + // Dump information about new memory leaks since the last + // call to DumpAllLeaks() or DumpNewLeaks() + static void DumpNewLeaks(); + + // Mark |size| bytes of memory as initialized, so it doesn't produce any UMRs + // or UMCs. + static void MarkAsInitialized(void* addr, size_t size); + + private: + static bool memory_in_use_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryDebug); +}; + +} // namespace base + +#endif // BASE_MEMORY_MEMORY_DEBUG_H_ diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h new file mode 100644 index 0000000..a4a50c3 --- /dev/null +++ b/base/memory/raw_scoped_refptr_mismatch_checker.h @@ -0,0 +1,130 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_ +#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_ +#pragma once + +#include "base/memory/ref_counted.h" +#include "base/template_util.h" +#include "base/tuple.h" +#include "build/build_config.h" + +// It is dangerous to post a task with a T* argument where T is a subtype of +// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the +// object may already have been deleted since it was not held with a +// scoped_refptr. Example: http://crbug.com/27191 +// The following set of traits are designed to generate a compile error +// whenever this antipattern is attempted. + +namespace base { + +// This is a base internal implementation file used by task.h and callback.h. +// Not for public consumption, so we wrap it in namespace internal. +namespace internal { + +template <typename T> +struct NeedsScopedRefptrButGetsRawPtr { +#if defined(OS_WIN) + enum { + value = base::false_type::value + }; +#else + enum { + // Human readable translation: you needed to be a scoped_refptr if you are a + // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase) + // type. + value = (is_pointer<T>::value && + (is_convertible<T, subtle::RefCountedBase*>::value || + is_convertible<T, subtle::RefCountedThreadSafeBase*>::value)) + }; +#endif +}; + +template <typename Params> +struct ParamsUseScopedRefptrCorrectly { + enum { value = 0 }; +}; + +template <> +struct ParamsUseScopedRefptrCorrectly<Tuple0> { + enum { value = 1 }; +}; + +template <typename A> +struct ParamsUseScopedRefptrCorrectly<Tuple1<A> > { + enum { value = !NeedsScopedRefptrButGetsRawPtr<A>::value }; +}; + +template <typename A, typename B> +struct ParamsUseScopedRefptrCorrectly<Tuple2<A, B> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value) }; +}; + +template <typename A, typename B, typename C> +struct ParamsUseScopedRefptrCorrectly<Tuple3<A, B, C> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value || + NeedsScopedRefptrButGetsRawPtr<C>::value) }; +}; + +template <typename A, typename B, typename C, typename D> +struct ParamsUseScopedRefptrCorrectly<Tuple4<A, B, C, D> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value || + NeedsScopedRefptrButGetsRawPtr<C>::value || + NeedsScopedRefptrButGetsRawPtr<D>::value) }; +}; + +template <typename A, typename B, typename C, typename D, typename E> +struct ParamsUseScopedRefptrCorrectly<Tuple5<A, B, C, D, E> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value || + NeedsScopedRefptrButGetsRawPtr<C>::value || + NeedsScopedRefptrButGetsRawPtr<D>::value || + NeedsScopedRefptrButGetsRawPtr<E>::value) }; +}; + +template <typename A, typename B, typename C, typename D, typename E, + typename F> +struct ParamsUseScopedRefptrCorrectly<Tuple6<A, B, C, D, E, F> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value || + NeedsScopedRefptrButGetsRawPtr<C>::value || + NeedsScopedRefptrButGetsRawPtr<D>::value || + NeedsScopedRefptrButGetsRawPtr<E>::value || + NeedsScopedRefptrButGetsRawPtr<F>::value) }; +}; + +template <typename A, typename B, typename C, typename D, typename E, + typename F, typename G> +struct ParamsUseScopedRefptrCorrectly<Tuple7<A, B, C, D, E, F, G> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value || + NeedsScopedRefptrButGetsRawPtr<C>::value || + NeedsScopedRefptrButGetsRawPtr<D>::value || + NeedsScopedRefptrButGetsRawPtr<E>::value || + NeedsScopedRefptrButGetsRawPtr<F>::value || + NeedsScopedRefptrButGetsRawPtr<G>::value) }; +}; + +template <typename A, typename B, typename C, typename D, typename E, + typename F, typename G, typename H> +struct ParamsUseScopedRefptrCorrectly<Tuple8<A, B, C, D, E, F, G, H> > { + enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value || + NeedsScopedRefptrButGetsRawPtr<B>::value || + NeedsScopedRefptrButGetsRawPtr<C>::value || + NeedsScopedRefptrButGetsRawPtr<D>::value || + NeedsScopedRefptrButGetsRawPtr<E>::value || + NeedsScopedRefptrButGetsRawPtr<F>::value || + NeedsScopedRefptrButGetsRawPtr<G>::value || + NeedsScopedRefptrButGetsRawPtr<H>::value) }; +}; + +} // namespace internal + +} // namespace base + +#endif // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_ diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc new file mode 100644 index 0000000..31ad509 --- /dev/null +++ b/base/memory/ref_counted.cc @@ -0,0 +1,95 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/ref_counted.h" + +#include "base/logging.h" +#include "base/threading/thread_collision_warner.h" + +namespace base { + +namespace subtle { + +RefCountedBase::RefCountedBase() + : ref_count_(0) +#ifndef NDEBUG + , in_dtor_(false) +#endif + { +} + +RefCountedBase::~RefCountedBase() { +#ifndef NDEBUG + DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()"; +#endif +} + +void RefCountedBase::AddRef() const { + // TODO(maruel): Add back once it doesn't assert 500 times/sec. + // Current thread books the critical section "AddRelease" without release it. + // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_); +#ifndef NDEBUG + DCHECK(!in_dtor_); +#endif + ++ref_count_; +} + +bool RefCountedBase::Release() const { + // TODO(maruel): Add back once it doesn't assert 500 times/sec. + // Current thread books the critical section "AddRelease" without release it. + // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_); +#ifndef NDEBUG + DCHECK(!in_dtor_); +#endif + if (--ref_count_ == 0) { +#ifndef NDEBUG + in_dtor_ = true; +#endif + return true; + } + return false; +} + +bool RefCountedThreadSafeBase::HasOneRef() const { + return AtomicRefCountIsOne( + &const_cast<RefCountedThreadSafeBase*>(this)->ref_count_); +} + +RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) { +#ifndef NDEBUG + in_dtor_ = false; +#endif +} + +RefCountedThreadSafeBase::~RefCountedThreadSafeBase() { +#ifndef NDEBUG + DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without " + "calling Release()"; +#endif +} + +void RefCountedThreadSafeBase::AddRef() const { +#ifndef NDEBUG + DCHECK(!in_dtor_); +#endif + AtomicRefCountInc(&ref_count_); +} + +bool RefCountedThreadSafeBase::Release() const { +#ifndef NDEBUG + DCHECK(!in_dtor_); + DCHECK(!AtomicRefCountIsZero(&ref_count_)); +#endif + if (!AtomicRefCountDec(&ref_count_)) { +#ifndef NDEBUG + in_dtor_ = true; +#endif + return true; + } + return false; +} + +} // namespace subtle + +} // namespace base diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h new file mode 100644 index 0000000..1207ed4 --- /dev/null +++ b/base/memory/ref_counted.h @@ -0,0 +1,299 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_REF_COUNTED_H_ +#define BASE_MEMORY_REF_COUNTED_H_ +#pragma once + +#include "base/atomic_ref_count.h" +#include "base/base_api.h" +#include "base/threading/thread_collision_warner.h" + +namespace base { + +namespace subtle { + +class BASE_API RefCountedBase { + public: + static bool ImplementsThreadSafeReferenceCounting() { return false; } + + bool HasOneRef() const { return ref_count_ == 1; } + + protected: + RefCountedBase(); + ~RefCountedBase(); + + void AddRef() const; + + // Returns true if the object should self-delete. + bool Release() const; + + private: + mutable int ref_count_; +#ifndef NDEBUG + mutable bool in_dtor_; +#endif + + DFAKE_MUTEX(add_release_); + + DISALLOW_COPY_AND_ASSIGN(RefCountedBase); +}; + +class BASE_API RefCountedThreadSafeBase { + public: + static bool ImplementsThreadSafeReferenceCounting() { return true; } + + bool HasOneRef() const; + + protected: + RefCountedThreadSafeBase(); + ~RefCountedThreadSafeBase(); + + void AddRef() const; + + // Returns true if the object should self-delete. + bool Release() const; + + private: + mutable AtomicRefCount ref_count_; +#ifndef NDEBUG + mutable bool in_dtor_; +#endif + + DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase); +}; + +} // namespace subtle + +// +// A base class for reference counted classes. Otherwise, known as a cheap +// knock-off of WebKit's RefCounted<T> class. To use this guy just extend your +// class from it like so: +// +// class MyFoo : public base::RefCounted<MyFoo> { +// ... +// private: +// friend class base::RefCounted<MyFoo>; +// ~MyFoo(); +// }; +// +// You should always make your destructor private, to avoid any code deleting +// the object accidently while there are references to it. +template <class T> +class RefCounted : public subtle::RefCountedBase { + public: + RefCounted() { } + ~RefCounted() { } + + void AddRef() const { + subtle::RefCountedBase::AddRef(); + } + + void Release() const { + if (subtle::RefCountedBase::Release()) { + delete static_cast<const T*>(this); + } + } + + private: + DISALLOW_COPY_AND_ASSIGN(RefCounted<T>); +}; + +// Forward declaration. +template <class T, typename Traits> class RefCountedThreadSafe; + +// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref +// count reaches 0. Overload to delete it on a different thread etc. +template<typename T> +struct DefaultRefCountedThreadSafeTraits { + static void Destruct(const T* x) { + // Delete through RefCountedThreadSafe to make child classes only need to be + // friend with RefCountedThreadSafe instead of this struct, which is an + // implementation detail. + RefCountedThreadSafe<T, + DefaultRefCountedThreadSafeTraits>::DeleteInternal(x); + } +}; + +// +// A thread-safe variant of RefCounted<T> +// +// class MyFoo : public base::RefCountedThreadSafe<MyFoo> { +// ... +// }; +// +// If you're using the default trait, then you should add compile time +// asserts that no one else is deleting your object. i.e. +// private: +// friend class base::RefCountedThreadSafe<MyFoo>; +// ~MyFoo(); +template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> > +class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase { + public: + RefCountedThreadSafe() { } + ~RefCountedThreadSafe() { } + + void AddRef() const { + subtle::RefCountedThreadSafeBase::AddRef(); + } + + void Release() const { + if (subtle::RefCountedThreadSafeBase::Release()) { + Traits::Destruct(static_cast<const T*>(this)); + } + } + + private: + friend struct DefaultRefCountedThreadSafeTraits<T>; + static void DeleteInternal(const T* x) { delete x; } + + DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe); +}; + +// +// A wrapper for some piece of data so we can place other things in +// scoped_refptrs<>. +// +template<typename T> +class RefCountedData : public base::RefCounted< base::RefCountedData<T> > { + public: + RefCountedData() : data() {} + RefCountedData(const T& in_value) : data(in_value) {} + + T data; +}; + +} // namespace base + +// +// A smart pointer class for reference counted objects. Use this class instead +// of calling AddRef and Release manually on a reference counted object to +// avoid common memory leaks caused by forgetting to Release an object +// reference. Sample usage: +// +// class MyFoo : public RefCounted<MyFoo> { +// ... +// }; +// +// void some_function() { +// scoped_refptr<MyFoo> foo = new MyFoo(); +// foo->Method(param); +// // |foo| is released when this function returns +// } +// +// void some_other_function() { +// scoped_refptr<MyFoo> foo = new MyFoo(); +// ... +// foo = NULL; // explicitly releases |foo| +// ... +// if (foo) +// foo->Method(param); +// } +// +// The above examples show how scoped_refptr<T> acts like a pointer to T. +// Given two scoped_refptr<T> classes, it is also possible to exchange +// references between the two objects, like so: +// +// { +// scoped_refptr<MyFoo> a = new MyFoo(); +// scoped_refptr<MyFoo> b; +// +// b.swap(a); +// // now, |b| references the MyFoo object, and |a| references NULL. +// } +// +// To make both |a| and |b| in the above example reference the same MyFoo +// object, simply use the assignment operator: +// +// { +// scoped_refptr<MyFoo> a = new MyFoo(); +// scoped_refptr<MyFoo> b; +// +// b = a; +// // now, |a| and |b| each own a reference to the same MyFoo object. +// } +// +template <class T> +class scoped_refptr { + public: + scoped_refptr() : ptr_(NULL) { + } + + scoped_refptr(T* p) : ptr_(p) { + if (ptr_) + ptr_->AddRef(); + } + + scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) { + if (ptr_) + ptr_->AddRef(); + } + + template <typename U> + scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) { + if (ptr_) + ptr_->AddRef(); + } + + ~scoped_refptr() { + if (ptr_) + ptr_->Release(); + } + + T* get() const { return ptr_; } + operator T*() const { return ptr_; } + T* operator->() const { return ptr_; } + + // Release a pointer. + // The return value is the current pointer held by this object. + // If this object holds a NULL pointer, the return value is NULL. + // After this operation, this object will hold a NULL pointer, + // and will not own the object any more. + T* release() { + T* retVal = ptr_; + ptr_ = NULL; + return retVal; + } + + scoped_refptr<T>& operator=(T* p) { + // AddRef first so that self assignment should work + if (p) + p->AddRef(); + if (ptr_ ) + ptr_ ->Release(); + ptr_ = p; + return *this; + } + + scoped_refptr<T>& operator=(const scoped_refptr<T>& r) { + return *this = r.ptr_; + } + + template <typename U> + scoped_refptr<T>& operator=(const scoped_refptr<U>& r) { + return *this = r.get(); + } + + void swap(T** pp) { + T* p = ptr_; + ptr_ = *pp; + *pp = p; + } + + void swap(scoped_refptr<T>& r) { + swap(&r.ptr_); + } + + protected: + T* ptr_; +}; + +// Handy utility for creating a scoped_refptr<T> out of a T* explicitly without +// having to retype all the template arguments +template <typename T> +scoped_refptr<T> make_scoped_refptr(T* t) { + return scoped_refptr<T>(t); +} + +#endif // BASE_MEMORY_REF_COUNTED_H_ diff --git a/base/memory/ref_counted_memory.cc b/base/memory/ref_counted_memory.cc new file mode 100644 index 0000000..aa16031 --- /dev/null +++ b/base/memory/ref_counted_memory.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/ref_counted_memory.h" + +RefCountedMemory::RefCountedMemory() { +} + +RefCountedMemory::~RefCountedMemory() { +} + +const unsigned char* RefCountedStaticMemory::front() const { + return data_; +} + +size_t RefCountedStaticMemory::size() const { + return length_; +} + +RefCountedBytes::RefCountedBytes() { +} + +RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer) + : data(initializer) { +} + +RefCountedBytes* RefCountedBytes::TakeVector( + std::vector<unsigned char>* to_destroy) { + RefCountedBytes* bytes = new RefCountedBytes; + bytes->data.swap(*to_destroy); + return bytes; +} + +const unsigned char* RefCountedBytes::front() const { + // STL will assert if we do front() on an empty vector, but calling code + // expects a NULL. + return size() ? &data.front() : NULL; +} + +size_t RefCountedBytes::size() const { + return data.size(); +} + +RefCountedBytes::~RefCountedBytes() { +} diff --git a/base/memory/ref_counted_memory.h b/base/memory/ref_counted_memory.h new file mode 100644 index 0000000..db15792 --- /dev/null +++ b/base/memory/ref_counted_memory.h @@ -0,0 +1,82 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_REF_COUNTED_MEMORY_H_ +#define BASE_MEMORY_REF_COUNTED_MEMORY_H_ +#pragma once + +#include <vector> + +#include "base/memory/ref_counted.h" + +// TODO(erg): The contents of this file should be in a namespace. This would +// require touching >100 files in chrome/ though. + +// A generic interface to memory. This object is reference counted because one +// of its two subclasses own the data they carry, and we need to have +// heterogeneous containers of these two types of memory. +class RefCountedMemory : public base::RefCountedThreadSafe<RefCountedMemory> { + public: + // Retrieves a pointer to the beginning of the data we point to. If the data + // is empty, this will return NULL. + virtual const unsigned char* front() const = 0; + + // Size of the memory pointed to. + virtual size_t size() const = 0; + + protected: + friend class base::RefCountedThreadSafe<RefCountedMemory>; + RefCountedMemory(); + virtual ~RefCountedMemory(); +}; + +// An implementation of RefCountedMemory, where the ref counting does not +// matter. +class RefCountedStaticMemory : public RefCountedMemory { + public: + RefCountedStaticMemory() + : data_(NULL), length_(0) {} + RefCountedStaticMemory(const unsigned char* data, size_t length) + : data_(data), length_(length) {} + + // Overriden from RefCountedMemory: + virtual const unsigned char* front() const; + virtual size_t size() const; + + private: + const unsigned char* data_; + size_t length_; + + DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory); +}; + +// An implementation of RefCountedMemory, where we own our the data in a +// vector. +class RefCountedBytes : public RefCountedMemory { + public: + RefCountedBytes(); + + // Constructs a RefCountedBytes object by _copying_ from |initializer|. + RefCountedBytes(const std::vector<unsigned char>& initializer); + + // Constructs a RefCountedBytes object by performing a swap. (To non + // destructively build a RefCountedBytes, use the constructor that takes a + // vector.) + static RefCountedBytes* TakeVector(std::vector<unsigned char>* to_destroy); + + // Overriden from RefCountedMemory: + virtual const unsigned char* front() const; + virtual size_t size() const; + + std::vector<unsigned char> data; + + protected: + friend class base::RefCountedThreadSafe<RefCountedBytes>; + virtual ~RefCountedBytes(); + + private: + DISALLOW_COPY_AND_ASSIGN(RefCountedBytes); +}; + +#endif // BASE_MEMORY_REF_COUNTED_MEMORY_H_ diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc new file mode 100644 index 0000000..dcc292f --- /dev/null +++ b/base/memory/ref_counted_unittest.cc @@ -0,0 +1,36 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/ref_counted.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +class SelfAssign : public base::RefCounted<SelfAssign> { + friend class base::RefCounted<SelfAssign>; + + ~SelfAssign() {} +}; + +class CheckDerivedMemberAccess : public scoped_refptr<SelfAssign> { + public: + CheckDerivedMemberAccess() { + // This shouldn't compile if we don't have access to the member variable. + SelfAssign** pptr = &ptr_; + EXPECT_EQ(*pptr, ptr_); + } +}; + +} // end namespace + +TEST(RefCountedUnitTest, TestSelfAssignment) { + SelfAssign* p = new SelfAssign; + scoped_refptr<SelfAssign> var(p); + var = var; + EXPECT_EQ(var.get(), p); +} + +TEST(RefCountedUnitTest, ScopedRefPtrMemberAccess) { + CheckDerivedMemberAccess check; +} diff --git a/base/memory/scoped_callback_factory.h b/base/memory/scoped_callback_factory.h new file mode 100644 index 0000000..a9c58a0 --- /dev/null +++ b/base/memory/scoped_callback_factory.h @@ -0,0 +1,133 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// ScopedCallbackFactory helps in cases where you wish to allocate a Callback +// (see base/callback.h), but need to prevent any pending callbacks from +// executing when your object gets destroyed. +// +// EXAMPLE: +// +// void GatherDataAsynchronously(Callback1<Data>::Type* callback); +// +// class MyClass { +// public: +// MyClass() : factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { +// } +// +// void Process() { +// GatherDataAsynchronously(factory_.NewCallback(&MyClass::GotData)); +// } +// +// private: +// void GotData(const Data& data) { +// ... +// } +// +// base::ScopedCallbackFactory<MyClass> factory_; +// }; +// +// In the above example, the Process function calls GatherDataAsynchronously to +// kick off some asynchronous processing that upon completion will notify a +// callback. If in the meantime, the MyClass instance is destroyed, when the +// callback runs, it will notice that the MyClass instance is dead, and it will +// avoid calling the GotData method. + +#ifndef BASE_MEMORY_SCOPED_CALLBACK_FACTORY_H_ +#define BASE_MEMORY_SCOPED_CALLBACK_FACTORY_H_ + +#include "base/callback.h" +#include "base/memory/weak_ptr.h" + +namespace base { + +template <class T> +class ScopedCallbackFactory { + public: + explicit ScopedCallbackFactory(T* obj) : weak_factory_(obj) { + } + + typename Callback0::Type* NewCallback( + void (T::*method)()) { + return new CallbackImpl<void (T::*)(), Tuple0 >( + weak_factory_.GetWeakPtr(), method); + } + + template <typename Arg1> + typename Callback1<Arg1>::Type* NewCallback( + void (T::*method)(Arg1)) { + return new CallbackImpl<void (T::*)(Arg1), Tuple1<Arg1> >( + weak_factory_.GetWeakPtr(), method); + } + + template <typename Arg1, typename Arg2> + typename Callback2<Arg1, Arg2>::Type* NewCallback( + void (T::*method)(Arg1, Arg2)) { + return new CallbackImpl<void (T::*)(Arg1, Arg2), Tuple2<Arg1, Arg2> >( + weak_factory_.GetWeakPtr(), method); + } + + template <typename Arg1, typename Arg2, typename Arg3> + typename Callback3<Arg1, Arg2, Arg3>::Type* NewCallback( + void (T::*method)(Arg1, Arg2, Arg3)) { + return new CallbackImpl<void (T::*)(Arg1, Arg2, Arg3), + Tuple3<Arg1, Arg2, Arg3> >( + weak_factory_.GetWeakPtr(), method); + } + + template <typename Arg1, typename Arg2, typename Arg3, typename Arg4> + typename Callback4<Arg1, Arg2, Arg3, Arg4>::Type* NewCallback( + void (T::*method)(Arg1, Arg2, Arg3, Arg4)) { + return new CallbackImpl<void (T::*)(Arg1, Arg2, Arg3, Arg4), + Tuple4<Arg1, Arg2, Arg3, Arg4> >( + weak_factory_.GetWeakPtr(), method); + } + + template <typename Arg1, typename Arg2, typename Arg3, typename Arg4, + typename Arg5> + typename Callback5<Arg1, Arg2, Arg3, Arg4, Arg5>::Type* NewCallback( + void (T::*method)(Arg1, Arg2, Arg3, Arg4, Arg5)) { + return new CallbackImpl<void (T::*)(Arg1, Arg2, Arg3, Arg4, Arg5), + Tuple5<Arg1, Arg2, Arg3, Arg4, Arg5> >( + weak_factory_.GetWeakPtr(), method); + } + + void RevokeAll() { weak_factory_.InvalidateWeakPtrs(); } + bool HasPendingCallbacks() const { return weak_factory_.HasWeakPtrs(); } + + private: + template <typename Method> + class CallbackStorage { + public: + CallbackStorage(const WeakPtr<T>& obj, Method meth) + : obj_(obj), + meth_(meth) { + } + + protected: + WeakPtr<T> obj_; + Method meth_; + }; + + template <typename Method, typename Params> + class CallbackImpl : public CallbackStorage<Method>, + public CallbackRunner<Params> { + public: + CallbackImpl(const WeakPtr<T>& obj, Method meth) + : CallbackStorage<Method>(obj, meth) { + } + virtual void RunWithParams(const Params& params) { + // Use "this->" to force C++ to look inside our templatized base class; + // see Effective C++, 3rd Ed, item 43, p210 for details. + if (!this->obj_) + return; + DispatchToMethod(this->obj_.get(), this->meth_, params); + } + }; + + WeakPtrFactory<T> weak_factory_; +}; + +} // namespace base + +#endif // BASE_MEMORY_SCOPED_CALLBACK_FACTORY_H_ diff --git a/base/memory/scoped_handle.h b/base/memory/scoped_handle.h new file mode 100644 index 0000000..232d83e --- /dev/null +++ b/base/memory/scoped_handle.h @@ -0,0 +1,51 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SCOPED_HANDLE_H_ +#define BASE_MEMORY_SCOPED_HANDLE_H_ +#pragma once + +#include <stdio.h> + +#include "base/basictypes.h" + +class ScopedStdioHandle { + public: + ScopedStdioHandle() + : handle_(NULL) { } + + explicit ScopedStdioHandle(FILE* handle) + : handle_(handle) { } + + ~ScopedStdioHandle() { + Close(); + } + + void Close() { + if (handle_) { + fclose(handle_); + handle_ = NULL; + } + } + + FILE* get() const { return handle_; } + + FILE* Take() { + FILE* temp = handle_; + handle_ = NULL; + return temp; + } + + void Set(FILE* newhandle) { + Close(); + handle_ = newhandle; + } + + private: + FILE* handle_; + + DISALLOW_COPY_AND_ASSIGN(ScopedStdioHandle); +}; + +#endif // BASE_MEMORY_SCOPED_HANDLE_H_ diff --git a/base/memory/scoped_native_library.cc b/base/memory/scoped_native_library.cc new file mode 100644 index 0000000..c9aef45 --- /dev/null +++ b/base/memory/scoped_native_library.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/scoped_native_library.h" + +namespace base { + +ScopedNativeLibrary::ScopedNativeLibrary() : library_(NULL) { +} + +ScopedNativeLibrary::ScopedNativeLibrary(NativeLibrary library) + : library_(library) { +} + +ScopedNativeLibrary::ScopedNativeLibrary(const FilePath& library_path) { + library_ = base::LoadNativeLibrary(library_path); +} + +ScopedNativeLibrary::~ScopedNativeLibrary() { + if (library_) + base::UnloadNativeLibrary(library_); +} + +void* ScopedNativeLibrary::GetFunctionPointer( + const char* function_name) const { + if (!library_) + return NULL; + return base::GetFunctionPointerFromNativeLibrary(library_, function_name); +} + +void ScopedNativeLibrary::Reset(NativeLibrary library) { + if (library_) + base::UnloadNativeLibrary(library_); + library_ = library; +} + +NativeLibrary ScopedNativeLibrary::Release() { + NativeLibrary result = library_; + library_ = NULL; + return result; +} + +} // namespace base diff --git a/base/memory/scoped_native_library.h b/base/memory/scoped_native_library.h new file mode 100644 index 0000000..56116b9 --- /dev/null +++ b/base/memory/scoped_native_library.h @@ -0,0 +1,53 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SCOPED_NATIVE_LIBRARY_H_ +#define BASE_MEMORY_SCOPED_NATIVE_LIBRARY_H_ +#pragma once + +#include "base/base_api.h" +#include "base/native_library.h" + +class FilePath; + +namespace base { + +// A class which encapsulates a base::NativeLibrary object available only in a +// scope. +// This class automatically unloads the loaded library in its destructor. +class BASE_API ScopedNativeLibrary { + public: + // Initializes with a NULL library. + ScopedNativeLibrary(); + + // Takes ownership of the given library handle. + explicit ScopedNativeLibrary(NativeLibrary library); + + // Opens the given library and manages its lifetime. + explicit ScopedNativeLibrary(const FilePath& library_path); + + ~ScopedNativeLibrary(); + + // Returns true if there's a valid library loaded. + bool is_valid() const { return !!library_; } + + void* GetFunctionPointer(const char* function_name) const; + + // Takes ownership of the given library handle. Any existing handle will + // be freed. + void Reset(NativeLibrary library); + + // Returns the native library handle and removes it from this object. The + // caller must manage the lifetime of the handle. + NativeLibrary Release(); + + private: + NativeLibrary library_; + + DISALLOW_COPY_AND_ASSIGN(ScopedNativeLibrary); +}; + +} // namespace base + +#endif // BASE_MEMORY_SCOPED_NATIVE_LIBRARY_H_ diff --git a/base/memory/scoped_native_library_unittest.cc b/base/memory/scoped_native_library_unittest.cc new file mode 100644 index 0000000..0cc60e2 --- /dev/null +++ b/base/memory/scoped_native_library_unittest.cc @@ -0,0 +1,33 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/scoped_native_library.h" +#if defined(OS_WIN) +#include "base/file_path.h" +#endif + +#include "testing/gtest/include/gtest/gtest.h" + +// Tests whether or not a function pointer retrieved via ScopedNativeLibrary +// is available only in a scope. +TEST(ScopedNativeLibrary, Basic) { +#if defined(OS_WIN) + // Get the pointer to DirectDrawCreate() from "ddraw.dll" and verify it + // is valid only in this scope. + // FreeLibrary() doesn't actually unload a DLL until its reference count + // becomes zero, i.e. this function pointer is still valid if the DLL used + // in this test is also used by another part of this executable. + // So, this test uses "ddraw.dll", which is not used by Chrome at all but + // installed on all versions of Windows. + FARPROC test_function; + { + FilePath path(base::GetNativeLibraryName(L"ddraw")); + base::ScopedNativeLibrary library(path); + test_function = reinterpret_cast<FARPROC>( + library.GetFunctionPointer("DirectDrawCreate")); + EXPECT_EQ(0, IsBadCodePtr(test_function)); + } + EXPECT_NE(0, IsBadCodePtr(test_function)); +#endif +} diff --git a/base/memory/scoped_nsobject.h b/base/memory/scoped_nsobject.h new file mode 100644 index 0000000..235ac39 --- /dev/null +++ b/base/memory/scoped_nsobject.h @@ -0,0 +1,167 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SCOPED_NSOBJECT_H_ +#define BASE_MEMORY_SCOPED_NSOBJECT_H_ +#pragma once + +#import <Foundation/Foundation.h> +#include "base/basictypes.h" +#include "base/compiler_specific.h" + +// scoped_nsobject<> is patterned after scoped_ptr<>, but maintains ownership +// of an NSObject subclass object. Style deviations here are solely for +// compatibility with scoped_ptr<>'s interface, with which everyone is already +// familiar. +// +// When scoped_nsobject<> takes ownership of an object (in the constructor or +// in reset()), it takes over the caller's existing ownership claim. The +// caller must own the object it gives to scoped_nsobject<>, and relinquishes +// an ownership claim to that object. scoped_nsobject<> does not call +// -retain. +// +// scoped_nsobject<> is not to be used for NSAutoreleasePools. For +// NSAutoreleasePools use ScopedNSAutoreleasePool from +// scoped_nsautorelease_pool.h instead. +// We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile +// time with a template specialization (see below). +template<typename NST> +class scoped_nsobject { + public: + typedef NST* element_type; + + explicit scoped_nsobject(NST* object = nil) + : object_(object) { + } + + ~scoped_nsobject() { + [object_ release]; + } + + void reset(NST* object = nil) { + // We intentionally do not check that object != object_ as the caller must + // already have an ownership claim over whatever it gives to + // scoped_nsobject and ScopedCFTypeRef, whether it's in the constructor or + // in a call to reset(). In either case, it relinquishes that claim and + // the scoper assumes it. + [object_ release]; + object_ = object; + } + + bool operator==(NST* that) const { return object_ == that; } + bool operator!=(NST* that) const { return object_ != that; } + + operator NST*() const { + return object_; + } + + NST* get() const { + return object_; + } + + void swap(scoped_nsobject& that) { + NST* temp = that.object_; + that.object_ = object_; + object_ = temp; + } + + // scoped_nsobject<>::release() is like scoped_ptr<>::release. It is NOT + // a wrapper for [object_ release]. To force a scoped_nsobject<> object to + // call [object_ release], use scoped_nsobject<>::reset(). + NST* release() WARN_UNUSED_RESULT { + NST* temp = object_; + object_ = nil; + return temp; + } + + private: + NST* object_; + + DISALLOW_COPY_AND_ASSIGN(scoped_nsobject); +}; + +// Free functions +template <class C> +void swap(scoped_nsobject<C>& p1, scoped_nsobject<C>& p2) { + p1.swap(p2); +} + +template <class C> +bool operator==(C* p1, const scoped_nsobject<C>& p2) { + return p1 == p2.get(); +} + +template <class C> +bool operator!=(C* p1, const scoped_nsobject<C>& p2) { + return p1 != p2.get(); +} + + +// Specialization to make scoped_nsobject<id> work. +template<> +class scoped_nsobject<id> { + public: + typedef id element_type; + + explicit scoped_nsobject(id object = nil) + : object_(object) { + } + + ~scoped_nsobject() { + [object_ release]; + } + + void reset(id object = nil) { + // We intentionally do not check that object != object_ as the caller must + // already have an ownership claim over whatever it gives to + // scoped_nsobject and ScopedCFTypeRef, whether it's in the constructor or + // in a call to reset(). In either case, it relinquishes that claim and + // the scoper assumes it. + [object_ release]; + object_ = object; + } + + bool operator==(id that) const { return object_ == that; } + bool operator!=(id that) const { return object_ != that; } + + operator id() const { + return object_; + } + + id get() const { + return object_; + } + + void swap(scoped_nsobject& that) { + id temp = that.object_; + that.object_ = object_; + object_ = temp; + } + + // scoped_nsobject<>::release() is like scoped_ptr<>::release. It is NOT + // a wrapper for [object_ release]. To force a scoped_nsobject<> object to + // call [object_ release], use scoped_nsobject<>::reset(). + id release() WARN_UNUSED_RESULT { + id temp = object_; + object_ = nil; + return temp; + } + + private: + id object_; + + DISALLOW_COPY_AND_ASSIGN(scoped_nsobject); +}; + +// Do not use scoped_nsobject for NSAutoreleasePools, use +// ScopedNSAutoreleasePool instead. This is a compile time check. See details +// at top of header. +template<> +class scoped_nsobject<NSAutoreleasePool> { + private: + explicit scoped_nsobject(NSAutoreleasePool* object = nil); + DISALLOW_COPY_AND_ASSIGN(scoped_nsobject); +}; + +#endif // BASE_MEMORY_SCOPED_NSOBJECT_H_ diff --git a/base/memory/scoped_open_process.h b/base/memory/scoped_open_process.h new file mode 100644 index 0000000..d5bdd95 --- /dev/null +++ b/base/memory/scoped_open_process.h @@ -0,0 +1,50 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SCOPED_OPEN_PROCESS_H_ +#define BASE_MEMORY_SCOPED_OPEN_PROCESS_H_ +#pragma once + +#include "base/process.h" +#include "base/process_util.h" + +namespace base { + +// A class that opens a process from its process id and closes it when the +// instance goes out of scope. +class ScopedOpenProcess { + public: + ScopedOpenProcess() : handle_(kNullProcessHandle) { + } + + // Automatically close the process. + ~ScopedOpenProcess() { + Close(); + } + + // Open a new process by pid. Closes any previously opened process (even if + // opening the new one fails). + bool Open(ProcessId pid) { + Close(); + return OpenProcessHandle(pid, &handle_); + } + + // Close the previously opened process. + void Close() { + if (handle_ == kNullProcessHandle) + return; + + CloseProcessHandle(handle_); + handle_ = kNullProcessHandle; + } + + ProcessHandle handle() const { return handle_; } + + private: + ProcessHandle handle_; + DISALLOW_COPY_AND_ASSIGN(ScopedOpenProcess); +}; +} // namespace base + +#endif // BASE_MEMORY_SCOPED_OPEN_PROCESS_H_ diff --git a/base/memory/scoped_ptr.h b/base/memory/scoped_ptr.h new file mode 100644 index 0000000..1067d42 --- /dev/null +++ b/base/memory/scoped_ptr.h @@ -0,0 +1,383 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Scopers help you manage ownership of a pointer, helping you easily manage the +// a pointer within a scope, and automatically destroying the pointer at the +// end of a scope. There are two main classes you will use, which correspond +// to the operators new/delete and new[]/delete[]. +// +// Example usage (scoped_ptr): +// { +// scoped_ptr<Foo> foo(new Foo("wee")); +// } // foo goes out of scope, releasing the pointer with it. +// +// { +// scoped_ptr<Foo> foo; // No pointer managed. +// foo.reset(new Foo("wee")); // Now a pointer is managed. +// foo.reset(new Foo("wee2")); // Foo("wee") was destroyed. +// foo.reset(new Foo("wee3")); // Foo("wee2") was destroyed. +// foo->Method(); // Foo::Method() called. +// foo.get()->Method(); // Foo::Method() called. +// SomeFunc(foo.release()); // SomeFunc takes ownership, foo no longer +// // manages a pointer. +// foo.reset(new Foo("wee4")); // foo manages a pointer again. +// foo.reset(); // Foo("wee4") destroyed, foo no longer +// // manages a pointer. +// } // foo wasn't managing a pointer, so nothing was destroyed. +// +// Example usage (scoped_array): +// { +// scoped_array<Foo> foo(new Foo[100]); +// foo.get()->Method(); // Foo::Method on the 0th element. +// foo[10].Method(); // Foo::Method on the 10th element. +// } + +#ifndef BASE_MEMORY_SCOPED_PTR_H_ +#define BASE_MEMORY_SCOPED_PTR_H_ +#pragma once + +// This is an implementation designed to match the anticipated future TR2 +// implementation of the scoped_ptr class, and its closely-related brethren, +// scoped_array, scoped_ptr_malloc. + +#include <assert.h> +#include <stddef.h> +#include <stdlib.h> + +#include "base/compiler_specific.h" + +// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T> +// automatically deletes the pointer it holds (if any). +// That is, scoped_ptr<T> owns the T object that it points to. +// Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to a T object. +// Also like T*, scoped_ptr<T> is thread-compatible, and once you +// dereference it, you get the threadsafety guarantees of T. +// +// The size of a scoped_ptr is small: +// sizeof(scoped_ptr<C>) == sizeof(C*) +template <class C> +class scoped_ptr { + public: + + // The element type + typedef C element_type; + + // Constructor. Defaults to initializing with NULL. + // There is no way to create an uninitialized scoped_ptr. + // The input parameter must be allocated with new. + explicit scoped_ptr(C* p = NULL) : ptr_(p) { } + + // Destructor. If there is a C object, delete it. + // We don't need to test ptr_ == NULL because C++ does that for us. + ~scoped_ptr() { + enum { type_must_be_complete = sizeof(C) }; + delete ptr_; + } + + // Reset. Deletes the current owned object, if any. + // Then takes ownership of a new object, if given. + // this->reset(this->get()) works. + void reset(C* p = NULL) { + if (p != ptr_) { + enum { type_must_be_complete = sizeof(C) }; + delete ptr_; + ptr_ = p; + } + } + + // Accessors to get the owned object. + // operator* and operator-> will assert() if there is no current object. + C& operator*() const { + assert(ptr_ != NULL); + return *ptr_; + } + C* operator->() const { + assert(ptr_ != NULL); + return ptr_; + } + C* get() const { return ptr_; } + + // Comparison operators. + // These return whether two scoped_ptr refer to the same object, not just to + // two different but equal objects. + bool operator==(C* p) const { return ptr_ == p; } + bool operator!=(C* p) const { return ptr_ != p; } + + // Swap two scoped pointers. + void swap(scoped_ptr& p2) { + C* tmp = ptr_; + ptr_ = p2.ptr_; + p2.ptr_ = tmp; + } + + // Release a pointer. + // The return value is the current pointer held by this object. + // If this object holds a NULL pointer, the return value is NULL. + // After this operation, this object will hold a NULL pointer, + // and will not own the object any more. + C* release() WARN_UNUSED_RESULT { + C* retVal = ptr_; + ptr_ = NULL; + return retVal; + } + + private: + C* ptr_; + + // Forbid comparison of scoped_ptr types. If C2 != C, it totally doesn't + // make sense, and if C2 == C, it still doesn't make sense because you should + // never have the same object owned by two different scoped_ptrs. + template <class C2> bool operator==(scoped_ptr<C2> const& p2) const; + template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const; + + // Disallow evil constructors + scoped_ptr(const scoped_ptr&); + void operator=(const scoped_ptr&); +}; + +// Free functions +template <class C> +void swap(scoped_ptr<C>& p1, scoped_ptr<C>& p2) { + p1.swap(p2); +} + +template <class C> +bool operator==(C* p1, const scoped_ptr<C>& p2) { + return p1 == p2.get(); +} + +template <class C> +bool operator!=(C* p1, const scoped_ptr<C>& p2) { + return p1 != p2.get(); +} + +// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate +// with new [] and the destructor deletes objects with delete []. +// +// As with scoped_ptr<C>, a scoped_array<C> either points to an object +// or is NULL. A scoped_array<C> owns the object that it points to. +// scoped_array<T> is thread-compatible, and once you index into it, +// the returned objects have only the threadsafety guarantees of T. +// +// Size: sizeof(scoped_array<C>) == sizeof(C*) +template <class C> +class scoped_array { + public: + + // The element type + typedef C element_type; + + // Constructor. Defaults to intializing with NULL. + // There is no way to create an uninitialized scoped_array. + // The input parameter must be allocated with new []. + explicit scoped_array(C* p = NULL) : array_(p) { } + + // Destructor. If there is a C object, delete it. + // We don't need to test ptr_ == NULL because C++ does that for us. + ~scoped_array() { + enum { type_must_be_complete = sizeof(C) }; + delete[] array_; + } + + // Reset. Deletes the current owned object, if any. + // Then takes ownership of a new object, if given. + // this->reset(this->get()) works. + void reset(C* p = NULL) { + if (p != array_) { + enum { type_must_be_complete = sizeof(C) }; + delete[] array_; + array_ = p; + } + } + + // Get one element of the current object. + // Will assert() if there is no current object, or index i is negative. + C& operator[](ptrdiff_t i) const { + assert(i >= 0); + assert(array_ != NULL); + return array_[i]; + } + + // Get a pointer to the zeroth element of the current object. + // If there is no current object, return NULL. + C* get() const { + return array_; + } + + // Comparison operators. + // These return whether two scoped_array refer to the same object, not just to + // two different but equal objects. + bool operator==(C* p) const { return array_ == p; } + bool operator!=(C* p) const { return array_ != p; } + + // Swap two scoped arrays. + void swap(scoped_array& p2) { + C* tmp = array_; + array_ = p2.array_; + p2.array_ = tmp; + } + + // Release an array. + // The return value is the current pointer held by this object. + // If this object holds a NULL pointer, the return value is NULL. + // After this operation, this object will hold a NULL pointer, + // and will not own the object any more. + C* release() WARN_UNUSED_RESULT { + C* retVal = array_; + array_ = NULL; + return retVal; + } + + private: + C* array_; + + // Forbid comparison of different scoped_array types. + template <class C2> bool operator==(scoped_array<C2> const& p2) const; + template <class C2> bool operator!=(scoped_array<C2> const& p2) const; + + // Disallow evil constructors + scoped_array(const scoped_array&); + void operator=(const scoped_array&); +}; + +// Free functions +template <class C> +void swap(scoped_array<C>& p1, scoped_array<C>& p2) { + p1.swap(p2); +} + +template <class C> +bool operator==(C* p1, const scoped_array<C>& p2) { + return p1 == p2.get(); +} + +template <class C> +bool operator!=(C* p1, const scoped_array<C>& p2) { + return p1 != p2.get(); +} + +// This class wraps the c library function free() in a class that can be +// passed as a template argument to scoped_ptr_malloc below. +class ScopedPtrMallocFree { + public: + inline void operator()(void* x) const { + free(x); + } +}; + +// scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a +// second template argument, the functor used to free the object. + +template<class C, class FreeProc = ScopedPtrMallocFree> +class scoped_ptr_malloc { + public: + + // The element type + typedef C element_type; + + // Constructor. Defaults to initializing with NULL. + // There is no way to create an uninitialized scoped_ptr. + // The input parameter must be allocated with an allocator that matches the + // Free functor. For the default Free functor, this is malloc, calloc, or + // realloc. + explicit scoped_ptr_malloc(C* p = NULL): ptr_(p) {} + + // Destructor. If there is a C object, call the Free functor. + ~scoped_ptr_malloc() { + free_(ptr_); + } + + // Reset. Calls the Free functor on the current owned object, if any. + // Then takes ownership of a new object, if given. + // this->reset(this->get()) works. + void reset(C* p = NULL) { + if (ptr_ != p) { + free_(ptr_); + ptr_ = p; + } + } + + // Get the current object. + // operator* and operator-> will cause an assert() failure if there is + // no current object. + C& operator*() const { + assert(ptr_ != NULL); + return *ptr_; + } + + C* operator->() const { + assert(ptr_ != NULL); + return ptr_; + } + + C* get() const { + return ptr_; + } + + // Comparison operators. + // These return whether a scoped_ptr_malloc and a plain pointer refer + // to the same object, not just to two different but equal objects. + // For compatibility with the boost-derived implementation, these + // take non-const arguments. + bool operator==(C* p) const { + return ptr_ == p; + } + + bool operator!=(C* p) const { + return ptr_ != p; + } + + // Swap two scoped pointers. + void swap(scoped_ptr_malloc & b) { + C* tmp = b.ptr_; + b.ptr_ = ptr_; + ptr_ = tmp; + } + + // Release a pointer. + // The return value is the current pointer held by this object. + // If this object holds a NULL pointer, the return value is NULL. + // After this operation, this object will hold a NULL pointer, + // and will not own the object any more. + C* release() WARN_UNUSED_RESULT { + C* tmp = ptr_; + ptr_ = NULL; + return tmp; + } + + private: + C* ptr_; + + // no reason to use these: each scoped_ptr_malloc should have its own object + template <class C2, class GP> + bool operator==(scoped_ptr_malloc<C2, GP> const& p) const; + template <class C2, class GP> + bool operator!=(scoped_ptr_malloc<C2, GP> const& p) const; + + static FreeProc const free_; + + // Disallow evil constructors + scoped_ptr_malloc(const scoped_ptr_malloc&); + void operator=(const scoped_ptr_malloc&); +}; + +template<class C, class FP> +FP const scoped_ptr_malloc<C, FP>::free_ = FP(); + +template<class C, class FP> inline +void swap(scoped_ptr_malloc<C, FP>& a, scoped_ptr_malloc<C, FP>& b) { + a.swap(b); +} + +template<class C, class FP> inline +bool operator==(C* p, const scoped_ptr_malloc<C, FP>& b) { + return p == b.get(); +} + +template<class C, class FP> inline +bool operator!=(C* p, const scoped_ptr_malloc<C, FP>& b) { + return p != b.get(); +} + +#endif // BASE_MEMORY_SCOPED_PTR_H_ diff --git a/base/memory/scoped_ptr_unittest.cc b/base/memory/scoped_ptr_unittest.cc new file mode 100644 index 0000000..7519051 --- /dev/null +++ b/base/memory/scoped_ptr_unittest.cc @@ -0,0 +1,169 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/basictypes.h" +#include "base/memory/scoped_ptr.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +class ConDecLogger { + public: + ConDecLogger() : ptr_(NULL) { } + explicit ConDecLogger(int* ptr) { set_ptr(ptr); } + ~ConDecLogger() { --*ptr_; } + + void set_ptr(int* ptr) { ptr_ = ptr; ++*ptr_; } + + int SomeMeth(int x) { return x; } + + private: + int* ptr_; + DISALLOW_COPY_AND_ASSIGN(ConDecLogger); +}; + +} // namespace + +TEST(ScopedPtrTest, ScopedPtr) { + int constructed = 0; + + { + scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed)); + EXPECT_EQ(1, constructed); + EXPECT_TRUE(scoper.get()); + + EXPECT_EQ(10, scoper->SomeMeth(10)); + EXPECT_EQ(10, scoper.get()->SomeMeth(10)); + EXPECT_EQ(10, (*scoper).SomeMeth(10)); + } + EXPECT_EQ(0, constructed); + + // Test reset() and release() + { + scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed)); + EXPECT_EQ(1, constructed); + EXPECT_TRUE(scoper.get()); + + scoper.reset(new ConDecLogger(&constructed)); + EXPECT_EQ(1, constructed); + EXPECT_TRUE(scoper.get()); + + scoper.reset(); + EXPECT_EQ(0, constructed); + EXPECT_FALSE(scoper.get()); + + scoper.reset(new ConDecLogger(&constructed)); + EXPECT_EQ(1, constructed); + EXPECT_TRUE(scoper.get()); + + ConDecLogger* take = scoper.release(); + EXPECT_EQ(1, constructed); + EXPECT_FALSE(scoper.get()); + delete take; + EXPECT_EQ(0, constructed); + + scoper.reset(new ConDecLogger(&constructed)); + EXPECT_EQ(1, constructed); + EXPECT_TRUE(scoper.get()); + } + EXPECT_EQ(0, constructed); + + // Test swap(), == and != + { + scoped_ptr<ConDecLogger> scoper1; + scoped_ptr<ConDecLogger> scoper2; + EXPECT_TRUE(scoper1 == scoper2.get()); + EXPECT_FALSE(scoper1 != scoper2.get()); + + ConDecLogger* logger = new ConDecLogger(&constructed); + scoper1.reset(logger); + EXPECT_EQ(logger, scoper1.get()); + EXPECT_FALSE(scoper2.get()); + EXPECT_FALSE(scoper1 == scoper2.get()); + EXPECT_TRUE(scoper1 != scoper2.get()); + + scoper2.swap(scoper1); + EXPECT_EQ(logger, scoper2.get()); + EXPECT_FALSE(scoper1.get()); + EXPECT_FALSE(scoper1 == scoper2.get()); + EXPECT_TRUE(scoper1 != scoper2.get()); + } + EXPECT_EQ(0, constructed); +} + +TEST(ScopedPtrTest, ScopedArray) { + static const int kNumLoggers = 12; + + int constructed = 0; + + { + scoped_array<ConDecLogger> scoper(new ConDecLogger[kNumLoggers]); + EXPECT_TRUE(scoper.get()); + EXPECT_EQ(&scoper[0], scoper.get()); + for (int i = 0; i < kNumLoggers; ++i) { + scoper[i].set_ptr(&constructed); + } + EXPECT_EQ(12, constructed); + + EXPECT_EQ(10, scoper.get()->SomeMeth(10)); + EXPECT_EQ(10, scoper[2].SomeMeth(10)); + } + EXPECT_EQ(0, constructed); + + // Test reset() and release() + { + scoped_array<ConDecLogger> scoper; + EXPECT_FALSE(scoper.get()); + EXPECT_FALSE(scoper.release()); + EXPECT_FALSE(scoper.get()); + scoper.reset(); + EXPECT_FALSE(scoper.get()); + + scoper.reset(new ConDecLogger[kNumLoggers]); + for (int i = 0; i < kNumLoggers; ++i) { + scoper[i].set_ptr(&constructed); + } + EXPECT_EQ(12, constructed); + scoper.reset(); + EXPECT_EQ(0, constructed); + + scoper.reset(new ConDecLogger[kNumLoggers]); + for (int i = 0; i < kNumLoggers; ++i) { + scoper[i].set_ptr(&constructed); + } + EXPECT_EQ(12, constructed); + ConDecLogger* ptr = scoper.release(); + EXPECT_EQ(12, constructed); + delete[] ptr; + EXPECT_EQ(0, constructed); + } + EXPECT_EQ(0, constructed); + + // Test swap(), == and != + { + scoped_array<ConDecLogger> scoper1; + scoped_array<ConDecLogger> scoper2; + EXPECT_TRUE(scoper1 == scoper2.get()); + EXPECT_FALSE(scoper1 != scoper2.get()); + + ConDecLogger* loggers = new ConDecLogger[kNumLoggers]; + for (int i = 0; i < kNumLoggers; ++i) { + loggers[i].set_ptr(&constructed); + } + scoper1.reset(loggers); + EXPECT_EQ(loggers, scoper1.get()); + EXPECT_FALSE(scoper2.get()); + EXPECT_FALSE(scoper1 == scoper2.get()); + EXPECT_TRUE(scoper1 != scoper2.get()); + + scoper2.swap(scoper1); + EXPECT_EQ(loggers, scoper2.get()); + EXPECT_FALSE(scoper1.get()); + EXPECT_FALSE(scoper1 == scoper2.get()); + EXPECT_TRUE(scoper1 != scoper2.get()); + } + EXPECT_EQ(0, constructed); +} + +// TODO scoped_ptr_malloc diff --git a/base/memory/scoped_temp_dir.cc b/base/memory/scoped_temp_dir.cc new file mode 100644 index 0000000..f7db15d --- /dev/null +++ b/base/memory/scoped_temp_dir.cc @@ -0,0 +1,84 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/scoped_temp_dir.h" + +#include "base/file_util.h" +#include "base/logging.h" + +ScopedTempDir::ScopedTempDir() { +} + +ScopedTempDir::~ScopedTempDir() { + if (!path_.empty() && !Delete()) + LOG(WARNING) << "Could not delete temp dir in dtor."; +} + +bool ScopedTempDir::CreateUniqueTempDir() { + if (!path_.empty()) + return false; + + // This "scoped_dir" prefix is only used on Windows and serves as a template + // for the unique name. + if (!file_util::CreateNewTempDirectory(FILE_PATH_LITERAL("scoped_dir"), + &path_)) + return false; + + return true; +} + +bool ScopedTempDir::CreateUniqueTempDirUnderPath(const FilePath& base_path) { + if (!path_.empty()) + return false; + + // If |base_path| does not exist, create it. + if (!file_util::CreateDirectory(base_path)) + return false; + + // Create a new, uniquely named directory under |base_path|. + if (!file_util::CreateTemporaryDirInDir( + base_path, + FILE_PATH_LITERAL("scoped_dir_"), + &path_)) + return false; + + return true; +} + +bool ScopedTempDir::Set(const FilePath& path) { + if (!path_.empty()) + return false; + + if (!file_util::DirectoryExists(path) && + !file_util::CreateDirectory(path)) + return false; + + path_ = path; + return true; +} + +bool ScopedTempDir::Delete() { + if (path_.empty()) + return false; + + bool ret = file_util::Delete(path_, true); + if (ret) { + // We only clear the path if deleted the directory. + path_.clear(); + } else { + LOG(ERROR) << "ScopedTempDir unable to delete " << path_.value(); + } + + return ret; +} + +FilePath ScopedTempDir::Take() { + FilePath ret = path_; + path_ = FilePath(); + return ret; +} + +bool ScopedTempDir::IsValid() const { + return !path_.empty() && file_util::DirectoryExists(path_); +} diff --git a/base/memory/scoped_temp_dir.h b/base/memory/scoped_temp_dir.h new file mode 100644 index 0000000..4c0a73f --- /dev/null +++ b/base/memory/scoped_temp_dir.h @@ -0,0 +1,59 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SCOPED_TEMP_DIR_H_ +#define BASE_MEMORY_SCOPED_TEMP_DIR_H_ +#pragma once + +// An object representing a temporary / scratch directory that should be cleaned +// up (recursively) when this object goes out of scope. Note that since +// deletion occurs during the destructor, no further error handling is possible +// if the directory fails to be deleted. As a result, deletion is not +// guaranteed by this class. +// +// Multiple calls to the methods which establish a temporary directory +// (CreateUniqueTempDir, CreateUniqueTempDirUnderPath, and Set) must have +// intervening calls to Delete or Take, or the calls will fail. + +#include "base/base_api.h" +#include "base/file_path.h" + +class BASE_API ScopedTempDir { + public: + // No directory is owned/created initially. + ScopedTempDir(); + + // Recursively delete path. + ~ScopedTempDir(); + + // Creates a unique directory in TempPath, and takes ownership of it. + // See file_util::CreateNewTemporaryDirectory. + bool CreateUniqueTempDir() WARN_UNUSED_RESULT; + + // Creates a unique directory under a given path, and takes ownership of it. + bool CreateUniqueTempDirUnderPath(const FilePath& path) WARN_UNUSED_RESULT; + + // Takes ownership of directory at |path|, creating it if necessary. + // Don't call multiple times unless Take() has been called first. + bool Set(const FilePath& path) WARN_UNUSED_RESULT; + + // Deletes the temporary directory wrapped by this object. + bool Delete() WARN_UNUSED_RESULT; + + // Caller takes ownership of the temporary directory so it won't be destroyed + // when this object goes out of scope. + FilePath Take(); + + const FilePath& path() const { return path_; } + + // Returns true if path_ is non-empty and exists. + bool IsValid() const; + + private: + FilePath path_; + + DISALLOW_COPY_AND_ASSIGN(ScopedTempDir); +}; + +#endif // BASE_MEMORY_SCOPED_TEMP_DIR_H_ diff --git a/base/memory/scoped_temp_dir_unittest.cc b/base/memory/scoped_temp_dir_unittest.cc new file mode 100644 index 0000000..a83856f --- /dev/null +++ b/base/memory/scoped_temp_dir_unittest.cc @@ -0,0 +1,110 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/file_util.h" +#include "base/memory/scoped_temp_dir.h" +#include "base/platform_file.h" +#include "testing/gtest/include/gtest/gtest.h" + +TEST(ScopedTempDir, FullPath) { + FilePath test_path; + file_util::CreateNewTempDirectory(FILE_PATH_LITERAL("scoped_temp_dir"), + &test_path); + + // Against an existing dir, it should get destroyed when leaving scope. + EXPECT_TRUE(file_util::DirectoryExists(test_path)); + { + ScopedTempDir dir; + EXPECT_TRUE(dir.Set(test_path)); + EXPECT_TRUE(dir.IsValid()); + } + EXPECT_FALSE(file_util::DirectoryExists(test_path)); + + { + ScopedTempDir dir; + EXPECT_TRUE(dir.Set(test_path)); + // Now the dir doesn't exist, so ensure that it gets created. + EXPECT_TRUE(file_util::DirectoryExists(test_path)); + // When we call Release(), it shouldn't get destroyed when leaving scope. + FilePath path = dir.Take(); + EXPECT_EQ(path.value(), test_path.value()); + EXPECT_FALSE(dir.IsValid()); + } + EXPECT_TRUE(file_util::DirectoryExists(test_path)); + + // Clean up. + { + ScopedTempDir dir; + EXPECT_TRUE(dir.Set(test_path)); + } + EXPECT_FALSE(file_util::DirectoryExists(test_path)); +} + +TEST(ScopedTempDir, TempDir) { + // In this case, just verify that a directory was created and that it's a + // child of TempDir. + FilePath test_path; + { + ScopedTempDir dir; + EXPECT_TRUE(dir.CreateUniqueTempDir()); + test_path = dir.path(); + EXPECT_TRUE(file_util::DirectoryExists(test_path)); + FilePath tmp_dir; + EXPECT_TRUE(file_util::GetTempDir(&tmp_dir)); + EXPECT_TRUE(test_path.value().find(tmp_dir.value()) != std::string::npos); + } + EXPECT_FALSE(file_util::DirectoryExists(test_path)); +} + +TEST(ScopedTempDir, UniqueTempDirUnderPath) { + // Create a path which will contain a unique temp path. + FilePath base_path; + file_util::CreateNewTempDirectory(FILE_PATH_LITERAL("base_dir"), + &base_path); + + FilePath test_path; + { + ScopedTempDir dir; + EXPECT_TRUE(dir.CreateUniqueTempDirUnderPath(base_path)); + test_path = dir.path(); + EXPECT_TRUE(file_util::DirectoryExists(test_path)); + EXPECT_TRUE(base_path.IsParent(test_path)); + EXPECT_TRUE(test_path.value().find(base_path.value()) != std::string::npos); + } + EXPECT_FALSE(file_util::DirectoryExists(test_path)); +} + +TEST(ScopedTempDir, MultipleInvocations) { + ScopedTempDir dir; + EXPECT_TRUE(dir.CreateUniqueTempDir()); + EXPECT_FALSE(dir.CreateUniqueTempDir()); + EXPECT_TRUE(dir.Delete()); + EXPECT_TRUE(dir.CreateUniqueTempDir()); + EXPECT_FALSE(dir.CreateUniqueTempDir()); + ScopedTempDir other_dir; + EXPECT_TRUE(other_dir.Set(dir.Take())); + EXPECT_TRUE(dir.CreateUniqueTempDir()); + EXPECT_FALSE(dir.CreateUniqueTempDir()); + EXPECT_FALSE(other_dir.CreateUniqueTempDir()); +} + +#if defined(OS_WIN) +TEST(ScopedTempDir, LockedTempDir) { + ScopedTempDir dir; + EXPECT_TRUE(dir.CreateUniqueTempDir()); + int file_flags = base::PLATFORM_FILE_CREATE_ALWAYS | + base::PLATFORM_FILE_WRITE; + base::PlatformFileError error_code = base::PLATFORM_FILE_OK; + FilePath file_path(dir.path().Append(FILE_PATH_LITERAL("temp"))); + base::PlatformFile file = base::CreatePlatformFile(file_path, file_flags, + NULL, &error_code); + EXPECT_NE(base::kInvalidPlatformFileValue, file); + EXPECT_EQ(base::PLATFORM_FILE_OK, error_code); + EXPECT_FALSE(dir.Delete()); // We should not be able to delete. + EXPECT_FALSE(dir.path().empty()); // We should still have a valid path. + EXPECT_TRUE(base::ClosePlatformFile(file)); + // Now, we should be able to delete. + EXPECT_TRUE(dir.Delete()); +} +#endif // defined(OS_WIN) diff --git a/base/memory/scoped_vector.h b/base/memory/scoped_vector.h new file mode 100644 index 0000000..aec4375 --- /dev/null +++ b/base/memory/scoped_vector.h @@ -0,0 +1,90 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SCOPED_VECTOR_H_ +#define BASE_MEMORY_SCOPED_VECTOR_H_ +#pragma once + +#include <vector> + +#include "base/basictypes.h" +#include "base/stl_util-inl.h" + +// ScopedVector wraps a vector deleting the elements from its +// destructor. +template <class T> +class ScopedVector { + public: + typedef typename std::vector<T*>::iterator iterator; + typedef typename std::vector<T*>::const_iterator const_iterator; + typedef typename std::vector<T*>::reverse_iterator reverse_iterator; + typedef typename std::vector<T*>::const_reverse_iterator + const_reverse_iterator; + + ScopedVector() {} + ~ScopedVector() { reset(); } + + std::vector<T*>* operator->() { return &v; } + const std::vector<T*>* operator->() const { return &v; } + T*& operator[](size_t i) { return v[i]; } + const T* operator[](size_t i) const { return v[i]; } + + bool empty() const { return v.empty(); } + size_t size() const { return v.size(); } + + reverse_iterator rbegin() { return v.rbegin(); } + const_reverse_iterator rbegin() const { return v.rbegin(); } + reverse_iterator rend() { return v.rend(); } + const_reverse_iterator rend() const { return v.rend(); } + + iterator begin() { return v.begin(); } + const_iterator begin() const { return v.begin(); } + iterator end() { return v.end(); } + const_iterator end() const { return v.end(); } + + void push_back(T* elem) { v.push_back(elem); } + + std::vector<T*>& get() { return v; } + const std::vector<T*>& get() const { return v; } + void swap(ScopedVector<T>& other) { v.swap(other.v); } + void release(std::vector<T*>* out) { + out->swap(v); + v.clear(); + } + + void reset() { STLDeleteElements(&v); } + void reserve(size_t capacity) { v.reserve(capacity); } + void resize(size_t new_size) { v.resize(new_size); } + + // Lets the ScopedVector take ownership of |x|. + iterator insert(iterator position, T* x) { + return v.insert(position, x); + } + + iterator erase(iterator position) { + delete *position; + return v.erase(position); + } + + iterator erase(iterator first, iterator last) { + STLDeleteContainerPointers(first, last); + return v.erase(first, last); + } + + // Like |erase()|, but doesn't delete the element at |position|. + iterator weak_erase(iterator position) { + return v.erase(position); + } + + // Like |erase()|, but doesn't delete the elements in [first, last). + iterator weak_erase(iterator first, iterator last) { + return v.erase(first, last); + } + private: + std::vector<T*> v; + + DISALLOW_COPY_AND_ASSIGN(ScopedVector); +}; + +#endif // BASE_MEMORY_SCOPED_VECTOR_H_ diff --git a/base/memory/singleton.h b/base/memory/singleton.h new file mode 100644 index 0000000..a387356 --- /dev/null +++ b/base/memory/singleton.h @@ -0,0 +1,271 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_SINGLETON_H_ +#define BASE_MEMORY_SINGLETON_H_ +#pragma once + +#include "base/at_exit.h" +#include "base/atomicops.h" +#include "base/third_party/dynamic_annotations/dynamic_annotations.h" +#include "base/threading/platform_thread.h" +#include "base/threading/thread_restrictions.h" + +// Default traits for Singleton<Type>. Calls operator new and operator delete on +// the object. Registers automatic deletion at process exit. +// Overload if you need arguments or another memory allocation function. +template<typename Type> +struct DefaultSingletonTraits { + // Allocates the object. + static Type* New() { + // The parenthesis is very important here; it forces POD type + // initialization. + return new Type(); + } + + // Destroys the object. + static void Delete(Type* x) { + delete x; + } + + // Set to true to automatically register deletion of the object on process + // exit. See below for the required call that makes this happen. + static const bool kRegisterAtExit = true; + + // Set to false to disallow access on a non-joinable thread. This is + // different from kRegisterAtExit because StaticMemorySingletonTraits allows + // access on non-joinable threads, and gracefully handles this. + static const bool kAllowedToAccessOnNonjoinableThread = false; +}; + + +// Alternate traits for use with the Singleton<Type>. Identical to +// DefaultSingletonTraits except that the Singleton will not be cleaned up +// at exit. +template<typename Type> +struct LeakySingletonTraits : public DefaultSingletonTraits<Type> { + static const bool kRegisterAtExit = false; + static const bool kAllowedToAccessOnNonjoinableThread = true; +}; + + +// Alternate traits for use with the Singleton<Type>. Allocates memory +// for the singleton instance from a static buffer. The singleton will +// be cleaned up at exit, but can't be revived after destruction unless +// the Resurrect() method is called. +// +// This is useful for a certain category of things, notably logging and +// tracing, where the singleton instance is of a type carefully constructed to +// be safe to access post-destruction. +// In logging and tracing you'll typically get stray calls at odd times, like +// during static destruction, thread teardown and the like, and there's a +// termination race on the heap-based singleton - e.g. if one thread calls +// get(), but then another thread initiates AtExit processing, the first thread +// may call into an object residing in unallocated memory. If the instance is +// allocated from the data segment, then this is survivable. +// +// The destructor is to deallocate system resources, in this case to unregister +// a callback the system will invoke when logging levels change. Note that +// this is also used in e.g. Chrome Frame, where you have to allow for the +// possibility of loading briefly into someone else's process space, and +// so leaking is not an option, as that would sabotage the state of your host +// process once you've unloaded. +template <typename Type> +struct StaticMemorySingletonTraits { + // WARNING: User has to deal with get() in the singleton class + // this is traits for returning NULL. + static Type* New() { + if (base::subtle::NoBarrier_AtomicExchange(&dead_, 1)) + return NULL; + Type* ptr = reinterpret_cast<Type*>(buffer_); + + // We are protected by a memory barrier. + new(ptr) Type(); + return ptr; + } + + static void Delete(Type* p) { + base::subtle::NoBarrier_Store(&dead_, 1); + base::subtle::MemoryBarrier(); + if (p != NULL) + p->Type::~Type(); + } + + static const bool kRegisterAtExit = true; + static const bool kAllowedToAccessOnNonjoinableThread = true; + + // Exposed for unittesting. + static void Resurrect() { + base::subtle::NoBarrier_Store(&dead_, 0); + } + + private: + static const size_t kBufferSize = (sizeof(Type) + + sizeof(intptr_t) - 1) / sizeof(intptr_t); + static intptr_t buffer_[kBufferSize]; + + // Signal the object was already deleted, so it is not revived. + static base::subtle::Atomic32 dead_; +}; + +template <typename Type> intptr_t + StaticMemorySingletonTraits<Type>::buffer_[kBufferSize]; +template <typename Type> base::subtle::Atomic32 + StaticMemorySingletonTraits<Type>::dead_ = 0; + +// The Singleton<Type, Traits, DifferentiatingType> class manages a single +// instance of Type which will be created on first use and will be destroyed at +// normal process exit). The Trait::Delete function will not be called on +// abnormal process exit. +// +// DifferentiatingType is used as a key to differentiate two different +// singletons having the same memory allocation functions but serving a +// different purpose. This is mainly used for Locks serving different purposes. +// +// Example usage: +// +// In your header: +// #include "base/memory/singleton.h" +// class FooClass { +// public: +// static FooClass* GetInstance(); <-- See comment below on this. +// void Bar() { ... } +// private: +// FooClass() { ... } +// friend struct DefaultSingletonTraits<FooClass>; +// +// DISALLOW_COPY_AND_ASSIGN(FooClass); +// }; +// +// In your source file: +// FooClass* FooClass::GetInstance() { +// return Singleton<FooClass>::get(); +// } +// +// And to call methods on FooClass: +// FooClass::GetInstance()->Bar(); +// +// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance +// and it is important that FooClass::GetInstance() is not inlined in the +// header. This makes sure that when source files from multiple targets include +// this header they don't end up with different copies of the inlined code +// creating multiple copies of the singleton. +// +// Singleton<> has no non-static members and doesn't need to actually be +// instantiated. +// +// This class is itself thread-safe. The underlying Type must of course be +// thread-safe if you want to use it concurrently. Two parameters may be tuned +// depending on the user's requirements. +// +// Glossary: +// RAE = kRegisterAtExit +// +// On every platform, if Traits::RAE is true, the singleton will be destroyed at +// process exit. More precisely it uses base::AtExitManager which requires an +// object of this type to be instantiated. AtExitManager mimics the semantics +// of atexit() such as LIFO order but under Windows is safer to call. For more +// information see at_exit.h. +// +// If Traits::RAE is false, the singleton will not be freed at process exit, +// thus the singleton will be leaked if it is ever accessed. Traits::RAE +// shouldn't be false unless absolutely necessary. Remember that the heap where +// the object is allocated may be destroyed by the CRT anyway. +// +// Caveats: +// (a) Every call to get(), operator->() and operator*() incurs some overhead +// (16ns on my P4/2.8GHz) to check whether the object has already been +// initialized. You may wish to cache the result of get(); it will not +// change. +// +// (b) Your factory function must never throw an exception. This class is not +// exception-safe. +// +template <typename Type, + typename Traits = DefaultSingletonTraits<Type>, + typename DifferentiatingType = Type> +class Singleton { + private: + // Classes using the Singleton<T> pattern should declare a GetInstance() + // method and call Singleton::get() from within that. + friend Type* Type::GetInstance(); + + // This class is safe to be constructed and copy-constructed since it has no + // member. + + // Return a pointer to the one true instance of the class. + static Type* get() { + if (!Traits::kAllowedToAccessOnNonjoinableThread) + base::ThreadRestrictions::AssertSingletonAllowed(); + + // Our AtomicWord doubles as a spinlock, where a value of + // kBeingCreatedMarker means the spinlock is being held for creation. + static const base::subtle::AtomicWord kBeingCreatedMarker = 1; + + base::subtle::AtomicWord value = base::subtle::NoBarrier_Load(&instance_); + if (value != 0 && value != kBeingCreatedMarker) { + // See the corresponding HAPPENS_BEFORE below. + ANNOTATE_HAPPENS_AFTER(&instance_); + return reinterpret_cast<Type*>(value); + } + + // Object isn't created yet, maybe we will get to create it, let's try... + if (base::subtle::Acquire_CompareAndSwap(&instance_, + 0, + kBeingCreatedMarker) == 0) { + // instance_ was NULL and is now kBeingCreatedMarker. Only one thread + // will ever get here. Threads might be spinning on us, and they will + // stop right after we do this store. + Type* newval = Traits::New(); + + // This annotation helps race detectors recognize correct lock-less + // synchronization between different threads calling get(). + // See the corresponding HAPPENS_AFTER below and above. + ANNOTATE_HAPPENS_BEFORE(&instance_); + base::subtle::Release_Store( + &instance_, reinterpret_cast<base::subtle::AtomicWord>(newval)); + + if (newval != NULL && Traits::kRegisterAtExit) + base::AtExitManager::RegisterCallback(OnExit, NULL); + + return newval; + } + + // We hit a race. Another thread beat us and either: + // - Has the object in BeingCreated state + // - Already has the object created... + // We know value != NULL. It could be kBeingCreatedMarker, or a valid ptr. + // Unless your constructor can be very time consuming, it is very unlikely + // to hit this race. When it does, we just spin and yield the thread until + // the object has been created. + while (true) { + value = base::subtle::NoBarrier_Load(&instance_); + if (value != kBeingCreatedMarker) + break; + base::PlatformThread::YieldCurrentThread(); + } + + // See the corresponding HAPPENS_BEFORE above. + ANNOTATE_HAPPENS_AFTER(&instance_); + return reinterpret_cast<Type*>(value); + } + + // Adapter function for use with AtExit(). This should be called single + // threaded, so don't use atomic operations. + // Calling OnExit while singleton is in use by other threads is a mistake. + static void OnExit(void* /*unused*/) { + // AtExit should only ever be register after the singleton instance was + // created. We should only ever get here with a valid instance_ pointer. + Traits::Delete( + reinterpret_cast<Type*>(base::subtle::NoBarrier_Load(&instance_))); + instance_ = 0; + } + static base::subtle::AtomicWord instance_; +}; + +template <typename Type, typename Traits, typename DifferentiatingType> +base::subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>:: + instance_ = 0; + +#endif // BASE_MEMORY_SINGLETON_H_ diff --git a/base/memory/singleton_objc.h b/base/memory/singleton_objc.h new file mode 100644 index 0000000..8531556 --- /dev/null +++ b/base/memory/singleton_objc.h @@ -0,0 +1,61 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Support for using the Singleton<T> pattern with Objective-C objects. A +// SingletonObjC is the same as a Singleton, except the default traits are +// appropriate for Objective-C objects. A typical Objective-C object of type +// NSExampleType can be maintained as a singleton and accessed with: +// +// NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get(); +// +// The first time this is used, it will create exampleSingleton as the result +// of [[NSExampleType alloc] init]. Subsequent calls will return the same +// NSExampleType* object. The object will be released by calling +// -[NSExampleType release] when Singleton's atexit routines run +// (see singleton.h). +// +// For Objective-C objects initialized through means other than the +// no-parameter -init selector, DefaultSingletonObjCTraits may be extended +// as needed: +// +// struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> { +// static Foo* New() { +// return [[Foo alloc] initWithName:@"selecty"]; +// } +// }; +// ... +// Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get(); + +#ifndef BASE_MEMORY_SINGLETON_OBJC_H_ +#define BASE_MEMORY_SINGLETON_OBJC_H_ +#pragma once + +#import <Foundation/Foundation.h> +#include "base/memory/singleton.h" + +// Singleton traits usable to manage traditional Objective-C objects, which +// are instantiated by sending |alloc| and |init| messages, and are deallocated +// in a memory-managed environment when their retain counts drop to 0 by +// sending |release| messages. +template<typename Type> +struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> { + static Type* New() { + return [[Type alloc] init]; + } + + static void Delete(Type* object) { + [object release]; + } +}; + +// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the +// default trait class. This makes it straightforward for Objective-C++ code +// to hold Objective-C objects as singletons. +template<typename Type, + typename Traits = DefaultSingletonObjCTraits<Type>, + typename DifferentiatingType = Type> +class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> { +}; + +#endif // BASE_MEMORY_SINGLETON_OBJC_H_ diff --git a/base/memory/singleton_unittest.cc b/base/memory/singleton_unittest.cc new file mode 100644 index 0000000..a605885 --- /dev/null +++ b/base/memory/singleton_unittest.cc @@ -0,0 +1,256 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/at_exit.h" +#include "base/file_util.h" +#include "base/memory/singleton.h" +#include "base/path_service.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +COMPILE_ASSERT(DefaultSingletonTraits<int>::kRegisterAtExit == true, a); + +typedef void (*CallbackFunc)(); + +class IntSingleton { + public: + static IntSingleton* GetInstance() { + return Singleton<IntSingleton>::get(); + } + + int value_; +}; + +class Init5Singleton { + public: + struct Trait; + + static Init5Singleton* GetInstance() { + return Singleton<Init5Singleton, Trait>::get(); + } + + int value_; +}; + +struct Init5Singleton::Trait : public DefaultSingletonTraits<Init5Singleton> { + static Init5Singleton* New() { + Init5Singleton* instance = new Init5Singleton(); + instance->value_ = 5; + return instance; + } +}; + +int* SingletonInt() { + return &IntSingleton::GetInstance()->value_; +} + +int* SingletonInt5() { + return &Init5Singleton::GetInstance()->value_; +} + +template <typename Type> +struct CallbackTrait : public DefaultSingletonTraits<Type> { + static void Delete(Type* instance) { + if (instance->callback_) + (instance->callback_)(); + DefaultSingletonTraits<Type>::Delete(instance); + } +}; + +class CallbackSingleton { + public: + CallbackSingleton() : callback_(NULL) { } + CallbackFunc callback_; +}; + +class CallbackSingletonWithNoLeakTrait : public CallbackSingleton { + public: + struct Trait : public CallbackTrait<CallbackSingletonWithNoLeakTrait> { }; + + CallbackSingletonWithNoLeakTrait() : CallbackSingleton() { } + + static CallbackSingletonWithNoLeakTrait* GetInstance() { + return Singleton<CallbackSingletonWithNoLeakTrait, Trait>::get(); + } +}; + +class CallbackSingletonWithLeakTrait : public CallbackSingleton { + public: + struct Trait : public CallbackTrait<CallbackSingletonWithLeakTrait> { + static const bool kRegisterAtExit = false; + }; + + CallbackSingletonWithLeakTrait() : CallbackSingleton() { } + + static CallbackSingletonWithLeakTrait* GetInstance() { + return Singleton<CallbackSingletonWithLeakTrait, Trait>::get(); + } +}; + +class CallbackSingletonWithStaticTrait : public CallbackSingleton { + public: + struct Trait; + + CallbackSingletonWithStaticTrait() : CallbackSingleton() { } + + static CallbackSingletonWithStaticTrait* GetInstance() { + return Singleton<CallbackSingletonWithStaticTrait, Trait>::get(); + } +}; + +struct CallbackSingletonWithStaticTrait::Trait + : public StaticMemorySingletonTraits<CallbackSingletonWithStaticTrait> { + static void Delete(CallbackSingletonWithStaticTrait* instance) { + if (instance->callback_) + (instance->callback_)(); + StaticMemorySingletonTraits<CallbackSingletonWithStaticTrait>::Delete( + instance); + } +}; + + +void SingletonNoLeak(CallbackFunc CallOnQuit) { + CallbackSingletonWithNoLeakTrait::GetInstance()->callback_ = CallOnQuit; +} + +void SingletonLeak(CallbackFunc CallOnQuit) { + CallbackSingletonWithLeakTrait::GetInstance()->callback_ = CallOnQuit; +} + +CallbackFunc* GetLeakySingleton() { + return &CallbackSingletonWithLeakTrait::GetInstance()->callback_; +} + +void DeleteLeakySingleton() { + DefaultSingletonTraits<CallbackSingletonWithLeakTrait>::Delete( + CallbackSingletonWithLeakTrait::GetInstance()); +} + +void SingletonStatic(CallbackFunc CallOnQuit) { + CallbackSingletonWithStaticTrait::GetInstance()->callback_ = CallOnQuit; +} + +CallbackFunc* GetStaticSingleton() { + return &CallbackSingletonWithStaticTrait::GetInstance()->callback_; +} + +void ResurrectStaticSingleton() { +} + +} // namespace + +class SingletonTest : public testing::Test { + public: + SingletonTest() { } + + virtual void SetUp() { + non_leak_called_ = false; + leaky_called_ = false; + static_called_ = false; + } + + protected: + void VerifiesCallbacks() { + EXPECT_TRUE(non_leak_called_); + EXPECT_FALSE(leaky_called_); + EXPECT_TRUE(static_called_); + non_leak_called_ = false; + leaky_called_ = false; + static_called_ = false; + } + + void VerifiesCallbacksNotCalled() { + EXPECT_FALSE(non_leak_called_); + EXPECT_FALSE(leaky_called_); + EXPECT_FALSE(static_called_); + non_leak_called_ = false; + leaky_called_ = false; + static_called_ = false; + } + + static void CallbackNoLeak() { + non_leak_called_ = true; + } + + static void CallbackLeak() { + leaky_called_ = true; + } + + static void CallbackStatic() { + static_called_ = true; + } + + private: + static bool non_leak_called_; + static bool leaky_called_; + static bool static_called_; +}; + +bool SingletonTest::non_leak_called_ = false; +bool SingletonTest::leaky_called_ = false; +bool SingletonTest::static_called_ = false; + +TEST_F(SingletonTest, Basic) { + int* singleton_int; + int* singleton_int_5; + CallbackFunc* leaky_singleton; + CallbackFunc* static_singleton; + + { + base::ShadowingAtExitManager sem; + { + singleton_int = SingletonInt(); + } + // Ensure POD type initialization. + EXPECT_EQ(*singleton_int, 0); + *singleton_int = 1; + + EXPECT_EQ(singleton_int, SingletonInt()); + EXPECT_EQ(*singleton_int, 1); + + { + singleton_int_5 = SingletonInt5(); + } + // Is default initialized to 5. + EXPECT_EQ(*singleton_int_5, 5); + + SingletonNoLeak(&CallbackNoLeak); + SingletonLeak(&CallbackLeak); + SingletonStatic(&CallbackStatic); + static_singleton = GetStaticSingleton(); + leaky_singleton = GetLeakySingleton(); + EXPECT_TRUE(leaky_singleton); + } + + // Verify that only the expected callback has been called. + VerifiesCallbacks(); + // Delete the leaky singleton. It is interesting to note that Purify does + // *not* detect the leak when this call is commented out. :( + DeleteLeakySingleton(); + + // The static singleton can't be acquired post-atexit. + EXPECT_EQ(NULL, GetStaticSingleton()); + + { + base::ShadowingAtExitManager sem; + // Verifiy that the variables were reset. + { + singleton_int = SingletonInt(); + EXPECT_EQ(*singleton_int, 0); + } + { + singleton_int_5 = SingletonInt5(); + EXPECT_EQ(*singleton_int_5, 5); + } + { + // Resurrect the static singleton, and assert that it + // still points to the same (static) memory. + CallbackSingletonWithStaticTrait::Trait::Resurrect(); + EXPECT_EQ(GetStaticSingleton(), static_singleton); + } + } + // The leaky singleton shouldn't leak since SingletonLeak has not been called. + VerifiesCallbacksNotCalled(); +} diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc new file mode 100644 index 0000000..fb09219 --- /dev/null +++ b/base/memory/weak_ptr.cc @@ -0,0 +1,81 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/weak_ptr.h" + +namespace base { +namespace internal { + +WeakReference::Flag::Flag(Flag** handle) : handle_(handle) { +} + +WeakReference::Flag::~Flag() { + if (handle_) + *handle_ = NULL; +} + +void WeakReference::Flag::AddRef() const { + DCHECK(CalledOnValidThread()); + RefCounted<Flag>::AddRef(); +} + +void WeakReference::Flag::Release() const { + DCHECK(CalledOnValidThread()); + RefCounted<Flag>::Release(); +} + +void WeakReference::Flag::Invalidate() { + DCHECK(CalledOnValidThread()); + handle_ = NULL; +} + +bool WeakReference::Flag::IsValid() const { + DCHECK(CalledOnValidThread()); + return handle_ != NULL; +} + +WeakReference::WeakReference() { +} + +WeakReference::WeakReference(Flag* flag) : flag_(flag) { +} + +WeakReference::~WeakReference() { +} + +bool WeakReference::is_valid() const { + return flag_ && flag_->IsValid(); +} + +WeakReferenceOwner::WeakReferenceOwner() : flag_(NULL) { +} + +WeakReferenceOwner::~WeakReferenceOwner() { + Invalidate(); +} + +WeakReference WeakReferenceOwner::GetRef() const { + if (!flag_) + flag_ = new WeakReference::Flag(&flag_); + return WeakReference(flag_); +} + +void WeakReferenceOwner::Invalidate() { + if (flag_) { + flag_->Invalidate(); + flag_ = NULL; + } +} + +WeakPtrBase::WeakPtrBase() { +} + +WeakPtrBase::~WeakPtrBase() { +} + +WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) { +} + +} // namespace internal +} // namespace base diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h new file mode 100644 index 0000000..edb9d93 --- /dev/null +++ b/base/memory/weak_ptr.h @@ -0,0 +1,246 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Weak pointers help in cases where you have many objects referring back to a +// shared object and you wish for the lifetime of the shared object to not be +// bound to the lifetime of the referrers. In other words, this is useful when +// reference counting is not a good fit. +// +// A common alternative to weak pointers is to have the shared object hold a +// list of all referrers, and then when the shared object is destroyed, it +// calls a method on the referrers to tell them to drop their references. This +// approach also requires the referrers to tell the shared object when they get +// destroyed so that the shared object can remove the referrer from its list of +// referrers. Such a solution works, but it is a bit complex. +// +// EXAMPLE: +// +// class Controller : public SupportsWeakPtr<Controller> { +// public: +// void SpawnWorker() { Worker::StartNew(AsWeakPtr()); } +// void WorkComplete(const Result& result) { ... } +// }; +// +// class Worker { +// public: +// static void StartNew(const WeakPtr<Controller>& controller) { +// Worker* worker = new Worker(controller); +// // Kick off asynchronous processing... +// } +// private: +// Worker(const WeakPtr<Controller>& controller) +// : controller_(controller) {} +// void DidCompleteAsynchronousProcessing(const Result& result) { +// if (controller_) +// controller_->WorkComplete(result); +// } +// WeakPtr<Controller> controller_; +// }; +// +// Given the above classes, a consumer may allocate a Controller object, call +// SpawnWorker several times, and then destroy the Controller object before all +// of the workers have completed. Because the Worker class only holds a weak +// pointer to the Controller, we don't have to worry about the Worker +// dereferencing the Controller back pointer after the Controller has been +// destroyed. +// +// WARNING: weak pointers are not threadsafe!!! You must only use a WeakPtr +// instance on thread where it was created. + +#ifndef BASE_MEMORY_WEAK_PTR_H_ +#define BASE_MEMORY_WEAK_PTR_H_ +#pragma once + +#include "base/base_api.h" +#include "base/logging.h" +#include "base/memory/ref_counted.h" +#include "base/threading/non_thread_safe.h" + +namespace base { + +namespace internal { +// These classes are part of the WeakPtr implementation. +// DO NOT USE THESE CLASSES DIRECTLY YOURSELF. + +class BASE_API WeakReference { + public: + class Flag : public RefCounted<Flag>, public base::NonThreadSafe { + public: + Flag(Flag** handle); + ~Flag(); + + void AddRef() const; + void Release() const; + void Invalidate(); + bool IsValid() const; + + void DetachFromThread() { base::NonThreadSafe::DetachFromThread(); } + + private: + Flag** handle_; + }; + + WeakReference(); + WeakReference(Flag* flag); + ~WeakReference(); + + bool is_valid() const; + + private: + scoped_refptr<Flag> flag_; +}; + +class BASE_API WeakReferenceOwner { + public: + WeakReferenceOwner(); + ~WeakReferenceOwner(); + + WeakReference GetRef() const; + + bool HasRefs() const { + return flag_ != NULL; + } + + void Invalidate(); + + // Indicates that this object will be used on another thread from now on. + void DetachFromThread() { + if (flag_) flag_->DetachFromThread(); + } + + private: + mutable WeakReference::Flag* flag_; +}; + +// This class simplifies the implementation of WeakPtr's type conversion +// constructor by avoiding the need for a public accessor for ref_. A +// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this +// base class gives us a way to access ref_ in a protected fashion. +class BASE_API WeakPtrBase { + public: + WeakPtrBase(); + ~WeakPtrBase(); + + protected: + WeakPtrBase(const WeakReference& ref); + + WeakReference ref_; +}; + +} // namespace internal + +template <typename T> class SupportsWeakPtr; +template <typename T> class WeakPtrFactory; + +// The WeakPtr class holds a weak reference to |T*|. +// +// This class is designed to be used like a normal pointer. You should always +// null-test an object of this class before using it or invoking a method that +// may result in the underlying object being destroyed. +// +// EXAMPLE: +// +// class Foo { ... }; +// WeakPtr<Foo> foo; +// if (foo) +// foo->method(); +// +template <typename T> +class WeakPtr : public internal::WeakPtrBase { + public: + WeakPtr() : ptr_(NULL) { + } + + // Allow conversion from U to T provided U "is a" T. + template <typename U> + WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.get()) { + } + + T* get() const { return ref_.is_valid() ? ptr_ : NULL; } + operator T*() const { return get(); } + + T* operator*() const { + DCHECK(get() != NULL); + return *get(); + } + T* operator->() const { + DCHECK(get() != NULL); + return get(); + } + + void reset() { + ref_ = internal::WeakReference(); + ptr_ = NULL; + } + + private: + friend class SupportsWeakPtr<T>; + friend class WeakPtrFactory<T>; + + WeakPtr(const internal::WeakReference& ref, T* ptr) + : WeakPtrBase(ref), ptr_(ptr) { + } + + // This pointer is only valid when ref_.is_valid() is true. Otherwise, its + // value is undefined (as opposed to NULL). + T* ptr_; +}; + +// A class may extend from SupportsWeakPtr to expose weak pointers to itself. +// This is useful in cases where you want others to be able to get a weak +// pointer to your class. It also has the property that you don't need to +// initialize it from your constructor. +template <class T> +class SupportsWeakPtr { + public: + SupportsWeakPtr() {} + + WeakPtr<T> AsWeakPtr() { + return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this)); + } + + // Indicates that this object will be used on another thread from now on. + void DetachFromThread() { + weak_reference_owner_.DetachFromThread(); + } + + private: + internal::WeakReferenceOwner weak_reference_owner_; + DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr); +}; + +// A class may alternatively be composed of a WeakPtrFactory and thereby +// control how it exposes weak pointers to itself. This is helpful if you only +// need weak pointers within the implementation of a class. This class is also +// useful when working with primitive types. For example, you could have a +// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool. +template <class T> +class WeakPtrFactory { + public: + explicit WeakPtrFactory(T* ptr) : ptr_(ptr) { + } + + WeakPtr<T> GetWeakPtr() { + return WeakPtr<T>(weak_reference_owner_.GetRef(), ptr_); + } + + // Call this method to invalidate all existing weak pointers. + void InvalidateWeakPtrs() { + weak_reference_owner_.Invalidate(); + } + + // Call this method to determine if any weak pointers exist. + bool HasWeakPtrs() const { + return weak_reference_owner_.HasRefs(); + } + + private: + internal::WeakReferenceOwner weak_reference_owner_; + T* ptr_; + DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory); +}; + +} // namespace base + +#endif // BASE_MEMORY_WEAK_PTR_H_ diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc new file mode 100644 index 0000000..6c2a7e8 --- /dev/null +++ b/base/memory/weak_ptr_unittest.cc @@ -0,0 +1,137 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/scoped_ptr.h" +#include "base/memory/weak_ptr.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "base/message_loop.h" +#include "base/threading/thread.h" + +namespace base { +namespace { + +template <class T> +class OffThreadObjectCreator { + public: + static T* NewObject() { + T* result; + { + Thread creator_thread("creator_thread"); + creator_thread.Start(); + creator_thread.message_loop()->PostTask( + FROM_HERE, + NewRunnableFunction(OffThreadObjectCreator::CreateObject, &result)); + } + DCHECK(result); // We synchronized on thread destruction above. + return result; + } + private: + static void CreateObject(T** result) { + *result = new T; + } +}; + +struct Base {}; +struct Derived : Base {}; + +struct Producer : SupportsWeakPtr<Producer> {}; +struct Consumer { WeakPtr<Producer> producer; }; + +} // namespace + +TEST(WeakPtrTest, Basic) { + int data; + WeakPtrFactory<int> factory(&data); + WeakPtr<int> ptr = factory.GetWeakPtr(); + EXPECT_EQ(&data, ptr.get()); +} + +TEST(WeakPtrTest, Comparison) { + int data; + WeakPtrFactory<int> factory(&data); + WeakPtr<int> ptr = factory.GetWeakPtr(); + WeakPtr<int> ptr2 = ptr; + EXPECT_TRUE(ptr == ptr2); +} + +TEST(WeakPtrTest, OutOfScope) { + WeakPtr<int> ptr; + EXPECT_TRUE(ptr.get() == NULL); + { + int data; + WeakPtrFactory<int> factory(&data); + ptr = factory.GetWeakPtr(); + } + EXPECT_TRUE(ptr.get() == NULL); +} + +TEST(WeakPtrTest, Multiple) { + WeakPtr<int> a, b; + { + int data; + WeakPtrFactory<int> factory(&data); + a = factory.GetWeakPtr(); + b = factory.GetWeakPtr(); + EXPECT_EQ(&data, a.get()); + EXPECT_EQ(&data, b.get()); + } + EXPECT_TRUE(a.get() == NULL); + EXPECT_TRUE(b.get() == NULL); +} + +TEST(WeakPtrTest, UpCast) { + Derived data; + WeakPtrFactory<Derived> factory(&data); + WeakPtr<Base> ptr = factory.GetWeakPtr(); + ptr = factory.GetWeakPtr(); + EXPECT_EQ(ptr.get(), &data); +} + +TEST(WeakPtrTest, SupportsWeakPtr) { + Producer f; + WeakPtr<Producer> ptr = f.AsWeakPtr(); + EXPECT_EQ(&f, ptr.get()); +} + +TEST(WeakPtrTest, InvalidateWeakPtrs) { + int data; + WeakPtrFactory<int> factory(&data); + WeakPtr<int> ptr = factory.GetWeakPtr(); + EXPECT_EQ(&data, ptr.get()); + EXPECT_TRUE(factory.HasWeakPtrs()); + factory.InvalidateWeakPtrs(); + EXPECT_TRUE(ptr.get() == NULL); + EXPECT_FALSE(factory.HasWeakPtrs()); +} + +TEST(WeakPtrTest, HasWeakPtrs) { + int data; + WeakPtrFactory<int> factory(&data); + { + WeakPtr<int> ptr = factory.GetWeakPtr(); + EXPECT_TRUE(factory.HasWeakPtrs()); + } + EXPECT_FALSE(factory.HasWeakPtrs()); +} + +TEST(WeakPtrTest, SingleThreaded1) { + // Test that it is OK to create a class that supports weak references on one + // thread, but use it on another. This tests that we do not trip runtime + // checks that ensure that a weak reference is not used by multiple threads. + scoped_ptr<Producer> producer(OffThreadObjectCreator<Producer>::NewObject()); + WeakPtr<Producer> weak_producer = producer->AsWeakPtr(); + EXPECT_EQ(producer.get(), weak_producer.get()); +} + +TEST(WeakPtrTest, SingleThreaded2) { + // Test that it is OK to create a class that has a WeakPtr member on one + // thread, but use it on another. This tests that we do not trip runtime + // checks that ensure that a weak reference is not used by multiple threads. + scoped_ptr<Consumer> consumer(OffThreadObjectCreator<Consumer>::NewObject()); + Producer producer; + consumer->producer = producer.AsWeakPtr(); + EXPECT_EQ(&producer, consumer->producer.get()); +} + +} // namespace base |