diff options
Diffstat (limited to 'base')
-rw-r--r-- | base/waitable_event.h | 38 | ||||
-rw-r--r-- | base/waitable_event_posix.cc | 82 | ||||
-rw-r--r-- | base/waitable_event_watcher.h | 6 | ||||
-rw-r--r-- | base/waitable_event_watcher_posix.cc | 24 | ||||
-rw-r--r-- | base/waitable_event_watcher_unittest.cc | 22 |
5 files changed, 108 insertions, 64 deletions
diff --git a/base/waitable_event.h b/base/waitable_event.h index 73fc7ff5..d2b0999 100644 --- a/base/waitable_event.h +++ b/base/waitable_event.h @@ -16,6 +16,7 @@ #include <utility> #include "base/condition_variable.h" #include "base/lock.h" +#include "base/ref_counted.h" #endif #include "base/message_loop.h" @@ -60,8 +61,6 @@ class WaitableEvent { HANDLE Release(); #endif - // WARNING: Destroying a WaitableEvent while threads are waiting on it is not - // supported. Doing so will cause crashes or other instability. ~WaitableEvent(); // Put the event in the un-signaled state. @@ -93,6 +92,9 @@ class WaitableEvent { // count: the number of elements in @waitables // // returns: the index of a WaitableEvent which has been signaled. + // + // You MUST NOT delete any of the WaitableEvent objects while this wait is + // happening. static size_t WaitMany(WaitableEvent** waitables, size_t count); // For asynchronous waiting, see WaitableEventWatcher @@ -130,10 +132,35 @@ class WaitableEvent { #if defined(OS_WIN) HANDLE handle_; #else + // On Windows, one can close a HANDLE which is currently being waited on. The + // MSDN documentation says that the resulting behaviour is 'undefined', but + // it doesn't crash. However, if we were to include the following members + // directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an + // event which gets deleted. This mismatch has bitten us several times now, + // so we have a kernel of the WaitableEvent, which is reference counted. + // WaitableEventWatchers may then take a reference and thus match the Windows + // behaviour. + struct WaitableEventKernel : + public RefCountedThreadSafe<WaitableEventKernel> { + public: + WaitableEventKernel(bool manual_reset, bool initially_signaled) + : manual_reset_(manual_reset), + signaled_(initially_signaled) { + } + + bool Dequeue(Waiter* waiter, void* tag); + + Lock lock_; + bool signaled_; + const bool manual_reset_; + std::list<Waiter*> waiters_; + }; + + scoped_refptr<WaitableEventKernel> kernel_; + bool SignalAll(); bool SignalOne(); void Enqueue(Waiter* waiter); - bool Dequeue(Waiter* waiter, void* tag); // When dealing with arrays of WaitableEvent*, we want to sort by the address // of the WaitableEvent in order to have a globally consistent locking order. @@ -143,11 +170,6 @@ class WaitableEvent { typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex; static size_t EnqueueMany(WaiterAndIndex* waitables, size_t count, Waiter* waiter); - - Lock lock_; - bool signaled_; - const bool manual_reset_; - std::list<Waiter*> waiters_; #endif DISALLOW_COPY_AND_ASSIGN(WaitableEvent); diff --git a/base/waitable_event_posix.cc b/base/waitable_event_posix.cc index 09e2868..690c755 100644 --- a/base/waitable_event_posix.cc +++ b/base/waitable_event_posix.cc @@ -35,46 +35,40 @@ namespace base { // This is just an abstract base class for waking the two types of waiters // ----------------------------------------------------------------------------- WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled) - : signaled_(initially_signaled), - manual_reset_(manual_reset) { + : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) { } WaitableEvent::~WaitableEvent() { - if (!waiters_.empty()) { - LOG(ERROR) << "Destroying a WaitableEvent (" << this << ") with " - << waiters_.size() << " waiters"; - NOTREACHED() << "Aborting."; - } } void WaitableEvent::Reset() { - AutoLock locked(lock_); - signaled_ = false; + AutoLock locked(kernel_->lock_); + kernel_->signaled_ = false; } void WaitableEvent::Signal() { - AutoLock locked(lock_); + AutoLock locked(kernel_->lock_); - if (signaled_) + if (kernel_->signaled_) return; - if (manual_reset_) { + if (kernel_->manual_reset_) { SignalAll(); - signaled_ = true; + kernel_->signaled_ = true; } else { // In the case of auto reset, if no waiters were woken, we remain // signaled. if (!SignalOne()) - signaled_ = true; + kernel_->signaled_ = true; } } bool WaitableEvent::IsSignaled() { - AutoLock locked(lock_); + AutoLock locked(kernel_->lock_); - const bool result = signaled_; - if (result && !manual_reset_) - signaled_ = false; + const bool result = kernel_->signaled_; + if (result && !kernel_->manual_reset_) + kernel_->signaled_ = false; return result; } @@ -150,15 +144,15 @@ bool WaitableEvent::TimedWait(const TimeDelta& max_time) { const Time end_time(Time::Now() + max_time); const bool finite_time = max_time.ToInternalValue() >= 0; - lock_.Acquire(); - if (signaled_) { - if (!manual_reset_) { + kernel_->lock_.Acquire(); + if (kernel_->signaled_) { + if (!kernel_->manual_reset_) { // In this case we were signaled when we had no waiters. Now that // someone has waited upon us, we can automatically reset. - signaled_ = false; + kernel_->signaled_ = false; } - lock_.Release(); + kernel_->lock_.Release(); return true; } @@ -168,7 +162,7 @@ bool WaitableEvent::TimedWait(const TimeDelta& max_time) { SyncWaiter sw(&cv, &lock); Enqueue(&sw); - lock_.Release(); + kernel_->lock_.Release(); // We are violating locking order here by holding the SyncWaiter lock but not // the WaitableEvent lock. However, this is safe because we don't lock @lock_ // again before unlocking it. @@ -187,9 +181,9 @@ bool WaitableEvent::TimedWait(const TimeDelta& max_time) { sw.Disable(); lock.Release(); - lock_.Acquire(); - Dequeue(&sw, &sw); - lock_.Release(); + kernel_->lock_.Acquire(); + kernel_->Dequeue(&sw, &sw); + kernel_->lock_.Release(); return return_value; } @@ -261,7 +255,7 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, lock.Acquire(); // Release the WaitableEvent locks in the reverse order for (size_t i = 0; i < count; ++i) { - waitables[count - (1 + i)].first->lock_.Release(); + waitables[count - (1 + i)].first->kernel_->lock_.Release(); } for (;;) { @@ -281,12 +275,12 @@ size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, // remove our SyncWaiter from the wait-list for (size_t i = 0; i < count; ++i) { if (raw_waitables[i] != signaled_event) { - raw_waitables[i]->lock_.Acquire(); + raw_waitables[i]->kernel_->lock_.Acquire(); // There's no possible ABA issue with the address of the SyncWaiter here // because it lives on the stack. Thus the tag value is just the pointer // value again. - raw_waitables[i]->Dequeue(&sw, &sw); - raw_waitables[i]->lock_.Release(); + raw_waitables[i]->kernel_->Dequeue(&sw, &sw); + raw_waitables[i]->kernel_->lock_.Release(); } else { signaled_index = i; } @@ -312,17 +306,17 @@ size_t WaitableEvent::EnqueueMany if (!count) return 0; - waitables[0].first->lock_.Acquire(); - if (waitables[0].first->signaled_) { - if (!waitables[0].first->manual_reset_) - waitables[0].first->signaled_ = false; - waitables[0].first->lock_.Release(); + waitables[0].first->kernel_->lock_.Acquire(); + if (waitables[0].first->kernel_->signaled_) { + if (!waitables[0].first->kernel_->manual_reset_) + waitables[0].first->kernel_->signaled_ = false; + waitables[0].first->kernel_->lock_.Release(); return count; } const size_t r = EnqueueMany(waitables + 1, count - 1, waiter); if (r) { - waitables[0].first->lock_.Release(); + waitables[0].first->kernel_->lock_.Release(); } else { waitables[0].first->Enqueue(waiter); } @@ -343,12 +337,12 @@ bool WaitableEvent::SignalAll() { bool signaled_at_least_one = false; for (std::list<Waiter*>::iterator - i = waiters_.begin(); i != waiters_.end(); ++i) { + i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) { if ((*i)->Fire(this)) signaled_at_least_one = true; } - waiters_.clear(); + kernel_->waiters_.clear(); return signaled_at_least_one; } @@ -358,11 +352,11 @@ bool WaitableEvent::SignalAll() { // --------------------------------------------------------------------------- bool WaitableEvent::SignalOne() { for (;;) { - if (waiters_.empty()) + if (kernel_->waiters_.empty()) return false; - const bool r = (*waiters_.begin())->Fire(this); - waiters_.pop_front(); + const bool r = (*kernel_->waiters_.begin())->Fire(this); + kernel_->waiters_.pop_front(); if (r) return true; } @@ -372,14 +366,14 @@ bool WaitableEvent::SignalOne() { // Add a waiter to the list of those waiting. Called with lock held. // ----------------------------------------------------------------------------- void WaitableEvent::Enqueue(Waiter* waiter) { - waiters_.push_back(waiter); + kernel_->waiters_.push_back(waiter); } // ----------------------------------------------------------------------------- // Remove a waiter from the list of those waiting. Return true if the waiter was // actually removed. Called with lock held. // ----------------------------------------------------------------------------- -bool WaitableEvent::Dequeue(Waiter* waiter, void* tag) { +bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) { for (std::list<Waiter*>::iterator i = waiters_.begin(); i != waiters_.end(); ++i) { if (*i == waiter && (*i)->Compare(tag)) { diff --git a/base/waitable_event_watcher.h b/base/waitable_event_watcher.h index 1b49944..94c3fdb 100644 --- a/base/waitable_event_watcher.h +++ b/base/waitable_event_watcher.h @@ -11,11 +11,11 @@ #include "base/object_watcher.h" #else #include "base/message_loop.h" +#include "base/waitable_event.h" #endif namespace base { -class WaitableEvent; class Flag; class AsyncWaiter; class AsyncCallbackTask; @@ -51,6 +51,9 @@ class AsyncCallbackTask; // occurs just before a WaitableEventWatcher is deleted. There is currently no // safe way to stop watching an automatic reset WaitableEvent without possibly // missing a signal. +// +// NOTE: you /are/ allowed to delete the WaitableEvent while still waiting on +// it with a Watcher. It will act as if the event was never signaled. // ----------------------------------------------------------------------------- class WaitableEventWatcher @@ -140,6 +143,7 @@ class WaitableEventWatcher scoped_refptr<Flag> cancel_flag_; AsyncWaiter* waiter_; AsyncCallbackTask* callback_task_; + scoped_refptr<WaitableEvent::WaitableEventKernel> kernel_; #endif }; diff --git a/base/waitable_event_watcher_posix.cc b/base/waitable_event_watcher_posix.cc index 10d47b7..1910c5f 100644 --- a/base/waitable_event_watcher_posix.cc +++ b/base/waitable_event_watcher_posix.cc @@ -155,12 +155,13 @@ bool WaitableEventWatcher::StartWatching cancel_flag_ = new Flag; callback_task_ = new AsyncCallbackTask(cancel_flag_, delegate, event); + WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get(); - AutoLock locked(event->lock_); + AutoLock locked(kernel->lock_); - if (event->signaled_) { - if (!event->manual_reset_) - event->signaled_ = false; + if (kernel->signaled_) { + if (!kernel->manual_reset_) + kernel->signaled_ = false; // No hairpinning - we can't call the delegate directly here. We have to // enqueue a task on the MessageLoop as normal. @@ -172,6 +173,7 @@ bool WaitableEventWatcher::StartWatching current_ml->AddDestructionObserver(this); event_ = event; + kernel_ = kernel; waiter_ = new AsyncWaiter(current_ml, callback_task_, cancel_flag_); event->Enqueue(waiter_); @@ -194,9 +196,9 @@ void WaitableEventWatcher::StopWatching() { return; } - if (!event_) { - // We have no WaitableEvent. This means that we never enqueued a Waiter on - // an event because the event was already signaled when StartWatching was + if (!kernel_.get()) { + // We have no kernel. This means that we never enqueued a Waiter on an + // event because the event was already signaled when StartWatching was // called. // // In this case, a task was enqueued on the MessageLoop and will run. @@ -208,9 +210,9 @@ void WaitableEventWatcher::StopWatching() { return; } - AutoLock locked(event_->lock_); - // We have a lock on the WaitableEvent. No one else can signal the event while - // we have it. + AutoLock locked(kernel_->lock_); + // We have a lock on the kernel. No one else can signal the event while we + // have it. // We have a possible ABA issue here. If Dequeue was to compare only the // pointer values then it's possible that the AsyncWaiter could have been @@ -224,7 +226,7 @@ void WaitableEventWatcher::StopWatching() { // have a reference to the Flag, its memory cannot be reused while this object // still exists. So if we find a waiter with the correct pointer value, and // which shares a Flag pointer, we have a real match. - if (event_->Dequeue(waiter_, cancel_flag_.get())) { + if (kernel_->Dequeue(waiter_, cancel_flag_.get())) { // Case 2: the waiter hasn't been signaled yet; it was still on the wait // list. We've removed it, thus we can delete it and the task (which cannot // have been enqueued with the MessageLoop because the waiter was never diff --git a/base/waitable_event_watcher_unittest.cc b/base/waitable_event_watcher_unittest.cc index 44b9b89..049de38 100644 --- a/base/waitable_event_watcher_unittest.cc +++ b/base/waitable_event_watcher_unittest.cc @@ -107,6 +107,22 @@ void RunTest_OutlivesMessageLoop(MessageLoop::Type message_loop_type) { } } +void RunTest_DeleteUnder(MessageLoop::Type message_loop_type) { + // Delete the WaitableEvent out from under the Watcher. This is explictly + // allowed by the interface. + + MessageLoop message_loop(message_loop_type); + + { + WaitableEventWatcher watcher; + + WaitableEvent* event = new WaitableEvent(false, false); + QuitDelegate delegate; + watcher.StartWatching(event, &delegate); + delete event; + } +} + } // namespace //----------------------------------------------------------------------------- @@ -134,3 +150,9 @@ TEST(WaitableEventWatcherTest, OutlivesMessageLoop) { RunTest_OutlivesMessageLoop(MessageLoop::TYPE_IO); RunTest_OutlivesMessageLoop(MessageLoop::TYPE_UI); } + +TEST(WaitableEventWatcherTest, DeleteUnder) { + RunTest_DeleteUnder(MessageLoop::TYPE_DEFAULT); + RunTest_DeleteUnder(MessageLoop::TYPE_IO); + RunTest_DeleteUnder(MessageLoop::TYPE_UI); +} |