summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/disk_cache/entry_impl.cc50
-rw-r--r--net/disk_cache/simple/simple_backend_impl.cc8
-rw-r--r--net/disk_cache/simple/simple_entry_impl.cc20
-rw-r--r--net/disk_cache/sparse_control.cc14
-rw-r--r--net/dns/host_resolver_impl.cc18
-rw-r--r--net/http/http_cache_transaction.cc36
-rw-r--r--net/http/http_proxy_client_socket_pool.cc2
-rw-r--r--net/http/http_transaction_unittest.cc2
-rw-r--r--net/socket/socket_test_util.cc7
-rw-r--r--net/spdy/spdy_http_stream.cc24
-rw-r--r--net/spdy/spdy_proxy_client_socket.cc12
-rw-r--r--net/spdy/spdy_session.cc24
-rw-r--r--net/spdy/spdy_session_spdy3_unittest.cc6
-rw-r--r--net/spdy/spdy_stream.cc4
-rw-r--r--net/spdy/spdy_stream_test_util.cc6
-rw-r--r--net/spdy/spdy_stream_test_util.h4
-rw-r--r--net/spdy/spdy_websocket_stream.cc12
-rw-r--r--net/spdy/spdy_websocket_stream_spdy2_unittest.cc2
-rw-r--r--net/spdy/spdy_websocket_stream_spdy3_unittest.cc2
-rw-r--r--net/spdy/spdy_write_queue.cc14
20 files changed, 134 insertions, 133 deletions
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 185a5a1..205a2ddb 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -95,7 +95,7 @@ class EntryImpl::UserBuffer {
buffer_.reserve(kMaxBlockSize);
}
~UserBuffer() {
- if (backend_)
+ if (backend_.get())
backend_->BufferDeleted(capacity() - kMaxBlockSize);
}
@@ -252,7 +252,7 @@ int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
void EntryImpl::UserBuffer::Reset() {
if (!grow_allowed_) {
- if (backend_)
+ if (backend_.get())
backend_->BufferDeleted(capacity() - kMaxBlockSize);
grow_allowed_ = true;
std::vector<char> tmp;
@@ -272,7 +272,7 @@ bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
if (required > limit)
return false;
- if (!backend_)
+ if (!backend_.get())
return false;
int to_add = std::max(required - current_size, kMaxBlockSize * 4);
@@ -302,7 +302,7 @@ EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
}
void EntryImpl::DoomImpl() {
- if (doomed_ || !backend_)
+ if (doomed_ || !backend_.get())
return;
SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
@@ -672,7 +672,7 @@ void EntryImpl::IncrementIoCount() {
}
void EntryImpl::DecrementIoCount() {
- if (backend_)
+ if (backend_.get())
backend_->DecrementIoCount();
}
@@ -688,7 +688,7 @@ void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
}
void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
- if (!backend_)
+ if (!backend_.get())
return;
switch (op) {
@@ -746,12 +746,12 @@ int EntryImpl::NumBlocksForEntry(int key_size) {
// ------------------------------------------------------------------------
void EntryImpl::Doom() {
- if (background_queue_)
+ if (background_queue_.get())
background_queue_->DoomEntryImpl(this);
}
void EntryImpl::Close() {
- if (background_queue_)
+ if (background_queue_.get())
background_queue_->CloseEntryImpl(this);
}
@@ -821,7 +821,7 @@ int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
if (buf_len < 0)
return net::ERR_INVALID_ARGUMENT;
- if (!background_queue_)
+ if (!background_queue_.get())
return net::ERR_UNEXPECTED;
background_queue_->ReadData(this, index, offset, buf, buf_len, callback);
@@ -840,7 +840,7 @@ int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
if (offset < 0 || buf_len < 0)
return net::ERR_INVALID_ARGUMENT;
- if (!background_queue_)
+ if (!background_queue_.get())
return net::ERR_UNEXPECTED;
background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
@@ -853,7 +853,7 @@ int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
if (callback.is_null())
return ReadSparseDataImpl(offset, buf, buf_len, callback);
- if (!background_queue_)
+ if (!background_queue_.get())
return net::ERR_UNEXPECTED;
background_queue_->ReadSparseData(this, offset, buf, buf_len, callback);
@@ -865,7 +865,7 @@ int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
if (callback.is_null())
return WriteSparseDataImpl(offset, buf, buf_len, callback);
- if (!background_queue_)
+ if (!background_queue_.get())
return net::ERR_UNEXPECTED;
background_queue_->WriteSparseData(this, offset, buf, buf_len, callback);
@@ -874,7 +874,7 @@ int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
const CompletionCallback& callback) {
- if (!background_queue_)
+ if (!background_queue_.get())
return net::ERR_UNEXPECTED;
background_queue_->GetAvailableRange(this, offset, len, start, callback);
@@ -891,7 +891,7 @@ bool EntryImpl::CouldBeSparse() const {
}
void EntryImpl::CancelSparseIO() {
- if (background_queue_)
+ if (background_queue_.get())
background_queue_->CancelSparseIO(this);
}
@@ -899,7 +899,7 @@ int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
if (!sparse_.get())
return net::OK;
- if (!background_queue_)
+ if (!background_queue_.get())
return net::ERR_UNEXPECTED;
background_queue_->ReadyForSparseIO(this, callback);
@@ -913,7 +913,7 @@ int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
// data related to a previous cache entry because the range was not fully
// written before).
EntryImpl::~EntryImpl() {
- if (!backend_) {
+ if (!backend_.get()) {
entry_.clear_modified();
node_.clear_modified();
return;
@@ -981,7 +981,7 @@ int EntryImpl::InternalReadData(int index, int offset,
if (buf_len < 0)
return net::ERR_INVALID_ARGUMENT;
- if (!backend_)
+ if (!backend_.get())
return net::ERR_UNEXPECTED;
TimeTicks start = TimeTicks::Now();
@@ -1063,7 +1063,7 @@ int EntryImpl::InternalWriteData(int index, int offset,
if (offset < 0 || buf_len < 0)
return net::ERR_INVALID_ARGUMENT;
- if (!backend_)
+ if (!backend_.get())
return net::ERR_UNEXPECTED;
int max_file_size = backend_->MaxFileSize();
@@ -1171,7 +1171,7 @@ bool EntryImpl::CreateDataBlock(int index, int size) {
bool EntryImpl::CreateBlock(int size, Addr* address) {
DCHECK(!address->is_initialized());
- if (!backend_)
+ if (!backend_.get())
return false;
FileType file_type = Addr::RequiredFileType(size);
@@ -1196,7 +1196,7 @@ bool EntryImpl::CreateBlock(int size, Addr* address) {
// important that the entry doesn't keep a reference to this address, or we'll
// end up deleting the contents of |address| once again.
void EntryImpl::DeleteData(Addr address, int index) {
- DCHECK(backend_);
+ DCHECK(backend_.get());
if (!address.is_initialized())
return;
if (address.is_separate_file()) {
@@ -1214,7 +1214,7 @@ void EntryImpl::DeleteData(Addr address, int index) {
}
void EntryImpl::UpdateRank(bool modified) {
- if (!backend_)
+ if (!backend_.get())
return;
if (!doomed_) {
@@ -1231,7 +1231,7 @@ void EntryImpl::UpdateRank(bool modified) {
}
File* EntryImpl::GetBackingFile(Addr address, int index) {
- if (!backend_)
+ if (!backend_.get())
return NULL;
File* file;
@@ -1288,7 +1288,7 @@ bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
}
if (!user_buffers_[index].get())
- user_buffers_[index].reset(new UserBuffer(backend_));
+ user_buffers_[index].reset(new UserBuffer(backend_.get()));
return PrepareBuffer(index, offset, buf_len);
}
@@ -1359,7 +1359,7 @@ bool EntryImpl::CopyToLocalBuffer(int index) {
DCHECK(address.is_initialized());
int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
- user_buffers_[index].reset(new UserBuffer(backend_));
+ user_buffers_[index].reset(new UserBuffer(backend_.get()));
user_buffers_[index]->Write(len, NULL, 0);
File* file = GetBackingFile(address, index);
@@ -1505,7 +1505,7 @@ uint32 EntryImpl::GetEntryFlags() {
}
void EntryImpl::GetData(int index, char** buffer, Addr* address) {
- DCHECK(backend_);
+ DCHECK(backend_.get());
if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
!user_buffers_[index]->Start()) {
// The data is already in memory, just copy it and we're done.
diff --git a/net/disk_cache/simple/simple_backend_impl.cc b/net/disk_cache/simple/simple_backend_impl.cc
index 009076f..7012276 100644
--- a/net/disk_cache/simple/simple_backend_impl.cc
+++ b/net/disk_cache/simple/simple_backend_impl.cc
@@ -233,7 +233,7 @@ void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
EntryMap::iterator it = active_entries_.find(entry_hash);
if (it == active_entries_.end())
continue;
- SimpleEntryImpl* entry = it->second;
+ SimpleEntryImpl* entry = it->second.get();
entry->Doom();
(*removed_key_hashes)[i] = removed_key_hashes->back();
@@ -330,12 +330,12 @@ scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
base::WeakPtr<SimpleEntryImpl>()));
EntryMap::iterator& it = insert_result.first;
if (insert_result.second)
- DCHECK(!it->second);
- if (!it->second) {
+ DCHECK(!it->second.get());
+ if (!it->second.get()) {
SimpleEntryImpl* entry = new SimpleEntryImpl(this, path_, key, entry_hash);
it->second = entry->AsWeakPtr();
}
- DCHECK(it->second);
+ DCHECK(it->second.get());
// It's possible, but unlikely, that we have an entry hash collision with a
// currently active entry.
if (key != it->second->key()) {
diff --git a/net/disk_cache/simple/simple_entry_impl.cc b/net/disk_cache/simple/simple_entry_impl.cc
index 8ed14be..054180c 100644
--- a/net/disk_cache/simple/simple_entry_impl.cc
+++ b/net/disk_cache/simple/simple_entry_impl.cc
@@ -117,7 +117,7 @@ SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend,
int SimpleEntryImpl::OpenEntry(Entry** out_entry,
const CompletionCallback& callback) {
- DCHECK(backend_);
+ DCHECK(backend_.get());
// This enumeration is used in histograms, add entries only at end.
enum OpenEntryIndexEnum {
INDEX_NOEXIST = 0,
@@ -126,7 +126,7 @@ int SimpleEntryImpl::OpenEntry(Entry** out_entry,
INDEX_MAX = 3,
};
OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
- if (backend_) {
+ if (backend_.get()) {
if (backend_->index()->Has(key_))
open_entry_index_enum = INDEX_HIT;
else
@@ -147,7 +147,7 @@ int SimpleEntryImpl::OpenEntry(Entry** out_entry,
int SimpleEntryImpl::CreateEntry(Entry** out_entry,
const CompletionCallback& callback) {
- DCHECK(backend_);
+ DCHECK(backend_.get());
int ret_value = net::ERR_FAILED;
if (state_ == STATE_UNINITIALIZED &&
pending_operations_.size() == 0) {
@@ -171,7 +171,7 @@ int SimpleEntryImpl::CreateEntry(Entry** out_entry,
// have the entry in the index but we don't have the created files yet, this
// way we never leak files. CreationOperationComplete will remove the entry
// from the index if the creation fails.
- if (backend_)
+ if (backend_.get())
backend_->index()->Insert(key_);
RunNextOperationIfNeeded();
@@ -274,7 +274,7 @@ int SimpleEntryImpl::WriteData(int stream_index,
RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
return net::ERR_INVALID_ARGUMENT;
}
- if (backend_ && offset + buf_len > backend_->GetMaxFileSize()) {
+ if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
return net::ERR_FAILED;
}
@@ -385,14 +385,14 @@ void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
}
void SimpleEntryImpl::RemoveSelfFromBackend() {
- if (!backend_)
+ if (!backend_.get())
return;
backend_->OnDeactivated(this);
backend_.reset();
}
void SimpleEntryImpl::MarkAsDoomed() {
- if (!backend_)
+ if (!backend_.get())
return;
backend_->index()->Remove(key_);
RemoveSelfFromBackend();
@@ -543,7 +543,7 @@ void SimpleEntryImpl::ReadDataInternal(int stream_index,
buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
state_ = STATE_IO_PENDING;
- if (backend_)
+ if (backend_.get())
backend_->index()->UseIfExists(key_);
scoped_ptr<uint32> read_crc32(new uint32());
@@ -579,7 +579,7 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,
}
DCHECK_EQ(STATE_READY, state_);
state_ = STATE_IO_PENDING;
- if (backend_)
+ if (backend_.get())
backend_->index()->UseIfExists(key_);
// It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
// if |offset == 0| or we have already computed the CRC for [0 .. offset).
@@ -786,7 +786,7 @@ void SimpleEntryImpl::SetSynchronousData() {
last_modified_ = synchronous_entry_->last_modified();
for (int i = 0; i < kSimpleEntryFileCount; ++i)
data_size_[i] = synchronous_entry_->data_size(i);
- if (backend_)
+ if (backend_.get())
backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
}
diff --git a/net/disk_cache/sparse_control.cc b/net/disk_cache/sparse_control.cc
index 9f13678..22b96c6 100644
--- a/net/disk_cache/sparse_control.cc
+++ b/net/disk_cache/sparse_control.cc
@@ -104,7 +104,7 @@ void ChildrenDeleter::Start(char* buffer, int len) {
void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
DCHECK(address.is_block_file());
- if (!backend_)
+ if (!backend_.get())
return Release();
disk_cache::File* file(backend_->File(address));
@@ -127,7 +127,7 @@ void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
void ChildrenDeleter::DeleteChildren() {
int child_id = 0;
- if (!children_map_.FindNextSetBit(&child_id) || !backend_) {
+ if (!children_map_.FindNextSetBit(&child_id) || !backend_.get()) {
// We are done. Just delete this object.
return Release();
}
@@ -350,9 +350,9 @@ void SparseControl::DeleteChildren(EntryImpl* entry) {
entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN);
- DCHECK(entry->backend_);
- ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_,
- entry->GetKey());
+ DCHECK(entry->backend_.get());
+ ChildrenDeleter* deleter =
+ new ChildrenDeleter(entry->backend_.get(), entry->GetKey());
// The object will self destruct when finished.
deleter->AddRef();
@@ -461,7 +461,7 @@ bool SparseControl::OpenChild() {
if (!ChildPresent())
return ContinueWithoutChild(key);
- if (!entry_->backend_)
+ if (!entry_->backend_.get())
return false;
child_ = entry_->backend_->OpenEntryImpl(key);
@@ -539,7 +539,7 @@ bool SparseControl::ContinueWithoutChild(const std::string& key) {
if (kGetRangeOperation == operation_)
return true;
- if (!entry_->backend_)
+ if (!entry_->backend_.get())
return false;
child_ = entry_->backend_->CreateEntryImpl(key);
diff --git a/net/dns/host_resolver_impl.cc b/net/dns/host_resolver_impl.cc
index 7845e0b..844c1a7 100644
--- a/net/dns/host_resolver_impl.cc
+++ b/net/dns/host_resolver_impl.cc
@@ -917,7 +917,7 @@ class HostResolverImpl::IPv6ProbeJob {
: resolver_(resolver),
net_log_(BoundNetLog::Make(net_log, NetLog::SOURCE_IPV6_PROBE_JOB)),
result_(false, IPV6_SUPPORT_MAX, OK) {
- DCHECK(resolver);
+ DCHECK(resolver.get());
net_log_.BeginEvent(NetLog::TYPE_IPV6_PROBE_RUNNING);
const bool kIsSlow = true;
base::WorkerPool::PostTaskAndReply(
@@ -939,7 +939,7 @@ class HostResolverImpl::IPv6ProbeJob {
net_log_.EndEvent(NetLog::TYPE_IPV6_PROBE_RUNNING,
base::Bind(&IPv6SupportResult::ToNetLogValue,
base::Unretained(&result_)));
- if (!resolver_)
+ if (!resolver_.get())
return;
resolver_->IPv6ProbeSetDefaultAddressFamily(
result_.ipv6_supported ? ADDRESS_FAMILY_UNSPECIFIED
@@ -963,7 +963,7 @@ class HostResolverImpl::LoopbackProbeJob {
explicit LoopbackProbeJob(const base::WeakPtr<HostResolverImpl>& resolver)
: resolver_(resolver),
result_(false) {
- DCHECK(resolver);
+ DCHECK(resolver.get());
const bool kIsSlow = true;
base::WorkerPool::PostTaskAndReply(
FROM_HERE,
@@ -981,7 +981,7 @@ class HostResolverImpl::LoopbackProbeJob {
}
void OnProbeComplete() {
- if (!resolver_)
+ if (!resolver_.get())
return;
resolver_->SetHaveOnlyLoopbackAddresses(result_);
}
@@ -1543,7 +1543,7 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
// Performs Job's last rites. Completes all Requests. Deletes this.
void CompleteRequests(const HostCache::Entry& entry,
base::TimeDelta ttl) {
- CHECK(resolver_);
+ CHECK(resolver_.get());
// This job must be removed from resolver's |jobs_| now to make room for a
// new job with the same key in case one of the OnComplete callbacks decides
@@ -1613,7 +1613,7 @@ class HostResolverImpl::Job : public PrioritizedDispatcher::Job {
// Check if the resolver was destroyed as a result of running the
// callback. If it was, we could continue, but we choose to bail.
- if (!resolver_)
+ if (!resolver_.get())
return;
}
}
@@ -2116,7 +2116,7 @@ void HostResolverImpl::AbortAllInProgressJobs() {
base::WeakPtr<HostResolverImpl> self = weak_ptr_factory_.GetWeakPtr();
// Then Abort them.
- for (size_t i = 0; self && i < jobs_to_abort.size(); ++i) {
+ for (size_t i = 0; self.get() && i < jobs_to_abort.size(); ++i) {
jobs_to_abort[i]->Abort();
jobs_to_abort[i] = NULL;
}
@@ -2132,7 +2132,7 @@ void HostResolverImpl::TryServingAllJobsFromHosts() {
// Life check to bail once |this| is deleted.
base::WeakPtr<HostResolverImpl> self = weak_ptr_factory_.GetWeakPtr();
- for (JobMap::iterator it = jobs_.begin(); self && it != jobs_.end(); ) {
+ for (JobMap::iterator it = jobs_.begin(); self.get() && it != jobs_.end();) {
Job* job = it->second;
++it;
// This could remove |job| from |jobs_|, but iterator will remain valid.
@@ -2193,7 +2193,7 @@ void HostResolverImpl::OnDNSChanged() {
AbortAllInProgressJobs();
// |this| may be deleted inside AbortAllInProgressJobs().
- if (self)
+ if (self.get())
TryServingAllJobsFromHosts();
}
diff --git a/net/http/http_cache_transaction.cc b/net/http/http_cache_transaction.cc
index 3a23682..8105f7c 100644
--- a/net/http/http_cache_transaction.cc
+++ b/net/http/http_cache_transaction.cc
@@ -216,7 +216,7 @@ HttpCache::Transaction::~Transaction() {
cache_io_start_ = base::TimeTicks();
deferred_cache_sensitivity_delay_ = base::TimeDelta();
- if (cache_) {
+ if (cache_.get()) {
if (entry_) {
bool cancel_request = reading_;
if (cancel_request) {
@@ -247,7 +247,7 @@ int HttpCache::Transaction::WriteMetadata(IOBuffer* buf, int buf_len,
DCHECK(buf);
DCHECK_GT(buf_len, 0);
DCHECK(!callback.is_null());
- if (!cache_ || !entry_)
+ if (!cache_.get() || !entry_)
return ERR_UNEXPECTED;
// We don't need to track this operation for anything.
@@ -303,7 +303,7 @@ int HttpCache::Transaction::Start(const HttpRequestInfo* request,
DCHECK(!network_trans_.get());
DCHECK(!entry_);
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
SetRequest(net_log, request);
@@ -327,7 +327,7 @@ int HttpCache::Transaction::RestartIgnoringLastError(
// Ensure that we only have one asynchronous call at a time.
DCHECK(callback_.is_null());
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
int rv = RestartNetworkRequest();
@@ -346,7 +346,7 @@ int HttpCache::Transaction::RestartWithCertificate(
// Ensure that we only have one asynchronous call at a time.
DCHECK(callback_.is_null());
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
int rv = RestartNetworkRequestWithCertificate(client_cert);
@@ -366,7 +366,7 @@ int HttpCache::Transaction::RestartWithAuth(
// Ensure that we only have one asynchronous call at a time.
DCHECK(callback_.is_null());
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
// Clear the intermediate response since we are going to start over.
@@ -394,7 +394,7 @@ int HttpCache::Transaction::Read(IOBuffer* buf, int buf_len,
DCHECK(callback_.is_null());
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
// If we have an intermediate auth response at this point, then it means the
@@ -447,14 +447,14 @@ void HttpCache::Transaction::StopCaching() {
// entry how it is (it will be marked as truncated at destruction), and let
// the next piece of code that executes know that we are now reading directly
// from the net.
- if (cache_ && entry_ && (mode_ & WRITE) && network_trans_.get() &&
+ if (cache_.get() && entry_ && (mode_ & WRITE) && network_trans_.get() &&
!is_sparse_ && !range_requested_) {
mode_ = NONE;
}
}
void HttpCache::Transaction::DoneReading() {
- if (cache_ && entry_) {
+ if (cache_.get() && entry_) {
DCHECK(reading_);
DCHECK_NE(mode_, UPDATE);
if (mode_ & WRITE)
@@ -477,7 +477,7 @@ LoadState HttpCache::Transaction::GetLoadState() const {
if (state != LOAD_STATE_WAITING_FOR_CACHE)
return state;
- if (cache_)
+ if (cache_.get())
return cache_->GetLoadStateForPendingTransaction(this);
return LOAD_STATE_IDLE;
@@ -855,7 +855,7 @@ int HttpCache::Transaction::DoSendRequest() {
int HttpCache::Transaction::DoSendRequestComplete(int result) {
ReportNetworkActionFinish();
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
// If requested, and we have a readable cache entry, and we have
@@ -992,7 +992,7 @@ int HttpCache::Transaction::DoNetworkReadComplete(int result) {
ReportNetworkActionFinish();
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
// If there is an error or we aren't saving the data, we are done; just wait
@@ -1007,7 +1007,7 @@ int HttpCache::Transaction::DoNetworkReadComplete(int result) {
int HttpCache::Transaction::DoInitEntry() {
DCHECK(!new_entry_);
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
if (mode_ == WRITE) {
@@ -1531,7 +1531,7 @@ int HttpCache::Transaction::DoCacheQueryData() {
int HttpCache::Transaction::DoCacheQueryDataComplete(int result) {
DCHECK_EQ(OK, result);
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
return ValidateEntryHeadersAndContinue();
@@ -1563,7 +1563,7 @@ int HttpCache::Transaction::DoCacheReadDataComplete(int result) {
result);
}
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
if (partial_.get()) {
@@ -1607,7 +1607,7 @@ int HttpCache::Transaction::DoCacheWriteDataComplete(int result) {
}
}
// Balance the AddRef from DoCacheWriteData.
- if (!cache_)
+ if (!cache_.get())
return ERR_UNEXPECTED;
if (result != write_len_) {
@@ -2317,7 +2317,7 @@ int HttpCache::Transaction::OnCacheReadError(int result, bool restart) {
}
// Avoid using this entry in the future.
- if (cache_)
+ if (cache_.get())
cache_->DoomActiveEntry(cache_key_);
if (restart) {
@@ -2510,7 +2510,7 @@ void HttpCache::Transaction::UpdateTransactionPattern(
void HttpCache::Transaction::RecordHistograms() {
DCHECK_NE(PATTERN_UNDEFINED, transaction_pattern_);
- if (!cache_ || !cache_->GetCurrentBackend() ||
+ if (!cache_.get() || !cache_->GetCurrentBackend() ||
cache_->GetCurrentBackend()->GetCacheType() != DISK_CACHE ||
cache_->mode() != NORMAL || request_->method != "GET") {
return;
diff --git a/net/http/http_proxy_client_socket_pool.cc b/net/http/http_proxy_client_socket_pool.cc
index cfff600..4b4fabc 100644
--- a/net/http/http_proxy_client_socket_pool.cc
+++ b/net/http/http_proxy_client_socket_pool.cc
@@ -332,7 +332,7 @@ int HttpProxyConnectJob::DoSpdyProxyCreateStreamComplete(int result) {
next_state_ = STATE_HTTP_PROXY_CONNECT_COMPLETE;
base::WeakPtr<SpdyStream> stream = spdy_stream_request_.ReleaseStream();
- DCHECK(stream);
+ DCHECK(stream.get());
// |transport_socket_| will set itself as |stream|'s delegate.
transport_socket_.reset(
new SpdyProxyClientSocket(stream,
diff --git a/net/http/http_transaction_unittest.cc b/net/http/http_transaction_unittest.cc
index bff06e0..40d733b 100644
--- a/net/http/http_transaction_unittest.cc
+++ b/net/http/http_transaction_unittest.cc
@@ -328,7 +328,7 @@ int MockNetworkTransaction::Read(net::IOBuffer* buf, int buf_len,
void MockNetworkTransaction::StopCaching() {}
void MockNetworkTransaction::DoneReading() {
- if (transaction_factory_)
+ if (transaction_factory_.get())
transaction_factory_->TransactionDoneReading();
}
diff --git a/net/socket/socket_test_util.cc b/net/socket/socket_test_util.cc
index a1e4d7c..a14ab2a 100644
--- a/net/socket/socket_test_util.cc
+++ b/net/socket/socket_test_util.cc
@@ -480,7 +480,8 @@ void DeterministicSocketData::Run() {
}
// We're done consuming new data, but it is possible there are still some
// pending callbacks which we expect to complete before returning.
- while (delegate_ && (delegate_->WritePending() || delegate_->ReadPending()) &&
+ while (delegate_.get() &&
+ (delegate_->WritePending() || delegate_->ReadPending()) &&
!stopped()) {
InvokeCallbacks();
base::RunLoop().RunUntilIdle();
@@ -589,13 +590,13 @@ void DeterministicSocketData::Reset() {
}
void DeterministicSocketData::InvokeCallbacks() {
- if (delegate_ && delegate_->WritePending() &&
+ if (delegate_.get() && delegate_->WritePending() &&
(current_write().sequence_number == sequence_number())) {
NextStep();
delegate_->CompleteWrite();
return;
}
- if (delegate_ && delegate_->ReadPending() &&
+ if (delegate_.get() && delegate_->ReadPending() &&
(current_read().sequence_number == sequence_number())) {
NextStep();
delegate_->CompleteRead();
diff --git a/net/spdy/spdy_http_stream.cc b/net/spdy/spdy_http_stream.cc
index 642da2c..5bcd82c 100644
--- a/net/spdy/spdy_http_stream.cc
+++ b/net/spdy/spdy_http_stream.cc
@@ -49,9 +49,9 @@ void SpdyHttpStream::InitializeWithExistingStream(
}
SpdyHttpStream::~SpdyHttpStream() {
- if (stream_) {
+ if (stream_.get()) {
stream_->DetachDelegate();
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
}
}
@@ -71,7 +71,7 @@ int SpdyHttpStream::InitializeStream(const HttpRequestInfo* request_info,
return error;
// |stream_| may be NULL even if OK was returned.
- if (stream_) {
+ if (stream_.get()) {
DCHECK_EQ(stream_->type(), SPDY_PUSH_STREAM);
stream_->SetDelegate(this);
return OK;
@@ -109,7 +109,7 @@ int SpdyHttpStream::ReadResponseHeaders(const CompletionCallback& callback) {
if (stream_closed_)
return closed_stream_status_;
- CHECK(stream_);
+ CHECK(stream_.get());
// Check if we already have the response headers. If so, return synchronously.
if(stream_->response_received()) {
@@ -125,7 +125,7 @@ int SpdyHttpStream::ReadResponseHeaders(const CompletionCallback& callback) {
int SpdyHttpStream::ReadResponseBody(
IOBuffer* buf, int buf_len, const CompletionCallback& callback) {
- if (stream_) {
+ if (stream_.get()) {
CHECK(stream_->is_idle());
CHECK(!stream_->closed());
}
@@ -187,7 +187,7 @@ bool SpdyHttpStream::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
// The reused flag can only be correctly set once a stream has an ID. Streams
// get their IDs once the request has been successfully sent, so this does not
// behave that differently from other stream types.
- if (!spdy_session_.get() || (!stream_ && !stream_closed_))
+ if (!spdy_session_.get() || (!stream_.get() && !stream_closed_))
return false;
SpdyStreamId stream_id =
@@ -282,9 +282,9 @@ int SpdyHttpStream::SendRequest(const HttpRequestHeaders& request_headers,
void SpdyHttpStream::Cancel() {
callback_.Reset();
- if (stream_) {
+ if (stream_.get()) {
stream_->Cancel();
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
}
}
@@ -389,7 +389,7 @@ void SpdyHttpStream::OnDataSent() {
}
void SpdyHttpStream::OnClose(int status) {
- if (stream_) {
+ if (stream_.get()) {
stream_closed_ = true;
closed_stream_status_ = status;
closed_stream_id_ = stream_->stream_id();
@@ -492,7 +492,7 @@ bool SpdyHttpStream::DoBufferedReadCallback() {
// If the transaction is cancelled or errored out, we don't need to complete
// the read.
- if (!stream_ && !stream_closed_)
+ if (!stream_.get() && !stream_closed_)
return false;
int stream_status =
@@ -531,7 +531,7 @@ void SpdyHttpStream::DoCallback(int rv) {
}
void SpdyHttpStream::GetSSLInfo(SSLInfo* ssl_info) {
- DCHECK(stream_);
+ DCHECK(stream_.get());
bool using_npn;
NextProto protocol_negotiated = kProtoUnknown;
stream_->GetSSLInfo(ssl_info, &using_npn, &protocol_negotiated);
@@ -539,7 +539,7 @@ void SpdyHttpStream::GetSSLInfo(SSLInfo* ssl_info) {
void SpdyHttpStream::GetSSLCertRequestInfo(
SSLCertRequestInfo* cert_request_info) {
- DCHECK(stream_);
+ DCHECK(stream_.get());
stream_->GetSSLCertRequestInfo(cert_request_info);
}
diff --git a/net/spdy/spdy_proxy_client_socket.cc b/net/spdy/spdy_proxy_client_socket.cc
index bfbc722..279a30b 100644
--- a/net/spdy/spdy_proxy_client_socket.cc
+++ b/net/spdy/spdy_proxy_client_socket.cc
@@ -137,11 +137,11 @@ void SpdyProxyClientSocket::Disconnect() {
next_state_ = STATE_DISCONNECTED;
- if (spdy_stream_) {
+ if (spdy_stream_.get()) {
// This will cause OnClose to be invoked, which takes care of
// cleaning up all the internal state.
spdy_stream_->Cancel();
- DCHECK(!spdy_stream_);
+ DCHECK(!spdy_stream_.get());
}
}
@@ -167,7 +167,7 @@ void SpdyProxyClientSocket::SetOmniboxSpeculation() {
}
bool SpdyProxyClientSocket::WasEverUsed() const {
- return was_ever_used_ || (spdy_stream_ && spdy_stream_->WasEverUsed());
+ return was_ever_used_ || (spdy_stream_.get() && spdy_stream_->WasEverUsed());
}
bool SpdyProxyClientSocket::UsingTCPFastOpen() const {
@@ -225,7 +225,7 @@ int SpdyProxyClientSocket::Write(IOBuffer* buf, int buf_len,
if (next_state_ != STATE_OPEN)
return ERR_SOCKET_NOT_CONNECTED;
- DCHECK(spdy_stream_);
+ DCHECK(spdy_stream_.get());
spdy_stream_->SendData(buf, buf_len, MORE_DATA_TO_SEND);
net_log_.AddByteTransferEvent(NetLog::TYPE_SOCKET_BYTES_SENT,
buf_len, buf->data());
@@ -408,7 +408,7 @@ int SpdyProxyClientSocket::DoReadReplyComplete(int result) {
// Immediately hand off our SpdyStream to a newly created
// SpdyHttpStream so that any subsequent SpdyFrames are processed in
// the context of the HttpStream, not the socket.
- DCHECK(spdy_stream_);
+ DCHECK(spdy_stream_.get());
base::WeakPtr<SpdyStream> stream = spdy_stream_;
spdy_stream_.reset();
response_stream_.reset(new SpdyHttpStream(NULL, false));
@@ -518,7 +518,7 @@ void SpdyProxyClientSocket::OnClose(int status) {
OnDataReceived(scoped_ptr<SpdyBuffer>());
}
// This may have been deleted by read_callback_, so check first.
- if (weak_ptr && !write_callback.is_null())
+ if (weak_ptr.get() && !write_callback.is_null())
write_callback.Run(ERR_CONNECTION_CLOSED);
}
diff --git a/net/spdy/spdy_session.cc b/net/spdy/spdy_session.cc
index 170dc82..1e71a27 100644
--- a/net/spdy/spdy_session.cc
+++ b/net/spdy/spdy_session.cc
@@ -246,7 +246,7 @@ int SpdyStreamRequest::StartRequest(
const CompletionCallback& callback) {
DCHECK(session.get());
DCHECK(!session_.get());
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
DCHECK(callback_.is_null());
type_ = type;
@@ -274,7 +274,7 @@ void SpdyStreamRequest::CancelRequest() {
base::WeakPtr<SpdyStream> SpdyStreamRequest::ReleaseStream() {
DCHECK(!session_.get());
base::WeakPtr<SpdyStream> stream = stream_;
- DCHECK(stream);
+ DCHECK(stream.get());
Reset();
return stream;
}
@@ -282,18 +282,18 @@ base::WeakPtr<SpdyStream> SpdyStreamRequest::ReleaseStream() {
void SpdyStreamRequest::OnRequestCompleteSuccess(
base::WeakPtr<SpdyStream>* stream) {
DCHECK(session_.get());
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
DCHECK(!callback_.is_null());
CompletionCallback callback = callback_;
Reset();
- DCHECK(*stream);
+ DCHECK(stream->get());
stream_ = *stream;
callback.Run(OK);
}
void SpdyStreamRequest::OnRequestCompleteFailure(int rv) {
DCHECK(session_.get());
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
DCHECK(!callback_.is_null());
CompletionCallback callback = callback_;
Reset();
@@ -923,7 +923,7 @@ void SpdySession::CloseCreatedStream(
DCHECK_EQ(0u, stream->stream_id());
scoped_ptr<SpdyStream> owned_stream(stream.get());
- created_streams_.erase(stream);
+ created_streams_.erase(stream.get());
DeleteStream(owned_stream.Pass(), status);
}
@@ -1100,7 +1100,7 @@ void SpdySession::OnWriteComplete(int result) {
if (in_flight_write_->GetRemainingSize() == 0) {
// It is possible that the stream was cancelled while we were
// writing to the socket.
- if (in_flight_write_stream_) {
+ if (in_flight_write_stream_.get()) {
DCHECK_GT(in_flight_write_frame_size_, 0u);
in_flight_write_stream_->OnFrameWriteComplete(
in_flight_write_frame_type_,
@@ -1162,13 +1162,13 @@ void SpdySession::WriteSocket() {
if (!write_queue_.Dequeue(&frame_type, &producer, &stream))
break;
- if (stream)
+ if (stream.get())
DCHECK(!stream->closed());
// Activate the stream only when sending the SYN_STREAM frame to
// guarantee monotonically-increasing stream IDs.
if (frame_type == SYN_STREAM) {
- if (stream && stream->stream_id() == 0) {
+ if (stream.get() && stream->stream_id() == 0) {
scoped_ptr<SpdyStream> owned_stream =
ActivateCreatedStream(stream.get());
InsertActivatedStream(owned_stream.Pass());
@@ -1441,7 +1441,7 @@ void SpdySession::InsertActivatedStream(scoped_ptr<SpdyStream> stream) {
}
void SpdySession::DeleteStream(scoped_ptr<SpdyStream> stream, int status) {
- if (in_flight_write_stream_ == stream.get()) {
+ if (in_flight_write_stream_.get() == stream.get()) {
// If we're deleting the stream for the in-flight write, we still
// need to let the write complete, so we clear
// |in_flight_write_stream_| and let the write finish on its own
@@ -2259,10 +2259,10 @@ void SpdySession::CompleteStreamRequest(SpdyStreamRequest* pending_request) {
pending_stream_request_completions_.erase(it);
if (rv == OK) {
- DCHECK(stream);
+ DCHECK(stream.get());
pending_request->OnRequestCompleteSuccess(&stream);
} else {
- DCHECK(!stream);
+ DCHECK(!stream.get());
pending_request->OnRequestCompleteFailure(rv);
}
}
diff --git a/net/spdy/spdy_session_spdy3_unittest.cc b/net/spdy/spdy_session_spdy3_unittest.cc
index 5ac87d6..961b0bc 100644
--- a/net/spdy/spdy_session_spdy3_unittest.cc
+++ b/net/spdy/spdy_session_spdy3_unittest.cc
@@ -3032,13 +3032,13 @@ void SpdySessionSpdy3Test::RunResumeAfterUnstallTest31(
EXPECT_TRUE(stream->HasUrl());
EXPECT_EQ(kStreamUrl, stream->GetUrl().spec());
- stall_fn.Run(session.get(), stream);
+ stall_fn.Run(session.get(), stream.get());
data.RunFor(2);
EXPECT_TRUE(stream->send_stalled_by_flow_control());
- unstall_function.Run(session.get(), stream, kBodyDataSize);
+ unstall_function.Run(session.get(), stream.get(), kBodyDataSize);
EXPECT_FALSE(stream->send_stalled_by_flow_control());
@@ -3259,7 +3259,7 @@ class StreamClosingDelegate : public test::StreamDelegateWithBody {
virtual void OnDataSent() OVERRIDE {
test::StreamDelegateWithBody::OnDataSent();
- if (stream_to_close_) {
+ if (stream_to_close_.get()) {
stream_to_close_->Close();
EXPECT_EQ(NULL, stream_to_close_.get());
}
diff --git a/net/spdy/spdy_stream.cc b/net/spdy/spdy_stream.cc
index cc4fd2d..3c57676 100644
--- a/net/spdy/spdy_stream.cc
+++ b/net/spdy/spdy_stream.cc
@@ -59,13 +59,13 @@ class SpdyStream::SynStreamBufferProducer : public SpdyBufferProducer {
public:
SynStreamBufferProducer(const base::WeakPtr<SpdyStream>& stream)
: stream_(stream) {
- DCHECK(stream_);
+ DCHECK(stream_.get());
}
virtual ~SynStreamBufferProducer() {}
virtual scoped_ptr<SpdyBuffer> ProduceBuffer() OVERRIDE {
- if (!stream_) {
+ if (!stream_.get()) {
NOTREACHED();
return scoped_ptr<SpdyBuffer>();
}
diff --git a/net/spdy/spdy_stream_test_util.cc b/net/spdy/spdy_stream_test_util.cc
index 3fd48bb..8024f8f 100644
--- a/net/spdy/spdy_stream_test_util.cc
+++ b/net/spdy/spdy_stream_test_util.cc
@@ -35,9 +35,9 @@ int ClosingDelegate::OnDataReceived(scoped_ptr<SpdyBuffer> buffer) {
void ClosingDelegate::OnDataSent() {}
void ClosingDelegate::OnClose(int status) {
- DCHECK(stream_);
+ DCHECK(stream_.get());
stream_->Close();
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
}
StreamDelegateBase::StreamDelegateBase(
@@ -74,7 +74,7 @@ int StreamDelegateBase::OnDataReceived(scoped_ptr<SpdyBuffer> buffer) {
void StreamDelegateBase::OnDataSent() {}
void StreamDelegateBase::OnClose(int status) {
- if (!stream_)
+ if (!stream_.get())
return;
stream_id_ = stream_->stream_id();
stream_.reset();
diff --git a/net/spdy/spdy_stream_test_util.h b/net/spdy/spdy_stream_test_util.h
index fb2520d..3641536 100644
--- a/net/spdy/spdy_stream_test_util.h
+++ b/net/spdy/spdy_stream_test_util.h
@@ -35,7 +35,7 @@ class ClosingDelegate : public SpdyStream::Delegate {
virtual void OnClose(int status) OVERRIDE;
// Returns whether or not the stream is closed.
- bool StreamIsClosed() const { return !stream_; }
+ bool StreamIsClosed() const { return !stream_.get(); }
private:
base::WeakPtr<SpdyStream> stream_;
@@ -65,7 +65,7 @@ class StreamDelegateBase : public SpdyStream::Delegate {
std::string TakeReceivedData();
// Returns whether or not the stream is closed.
- bool StreamIsClosed() const { return !stream_; }
+ bool StreamIsClosed() const { return !stream_.get(); }
// Returns the stream's ID. If called when the stream is closed,
// returns the stream's ID when it was open.
diff --git a/net/spdy/spdy_websocket_stream.cc b/net/spdy/spdy_websocket_stream.cc
index 0713c9a..3363d51 100644
--- a/net/spdy/spdy_websocket_stream.cc
+++ b/net/spdy/spdy_websocket_stream.cc
@@ -45,14 +45,14 @@ int SpdyWebSocketStream::InitializeStream(const GURL& url,
if (rv == OK) {
stream_ = stream_request_.ReleaseStream();
- DCHECK(stream_);
+ DCHECK(stream_.get());
stream_->SetDelegate(this);
}
return rv;
}
int SpdyWebSocketStream::SendRequest(scoped_ptr<SpdyHeaderBlock> headers) {
- if (!stream_) {
+ if (!stream_.get()) {
NOTREACHED();
return ERR_UNEXPECTED;
}
@@ -63,7 +63,7 @@ int SpdyWebSocketStream::SendRequest(scoped_ptr<SpdyHeaderBlock> headers) {
}
int SpdyWebSocketStream::SendData(const char* data, int length) {
- if (!stream_) {
+ if (!stream_.get()) {
NOTREACHED();
return ERR_UNEXPECTED;
}
@@ -76,9 +76,9 @@ int SpdyWebSocketStream::SendData(const char* data, int length) {
}
void SpdyWebSocketStream::Close() {
- if (stream_) {
+ if (stream_.get()) {
stream_->Close();
- DCHECK(!stream_);
+ DCHECK(!stream_.get());
}
}
@@ -121,7 +121,7 @@ void SpdyWebSocketStream::OnSpdyStreamCreated(int result) {
DCHECK_NE(ERR_IO_PENDING, result);
if (result == OK) {
stream_ = stream_request_.ReleaseStream();
- DCHECK(stream_);
+ DCHECK(stream_.get());
stream_->SetDelegate(this);
}
DCHECK(delegate_);
diff --git a/net/spdy/spdy_websocket_stream_spdy2_unittest.cc b/net/spdy/spdy_websocket_stream_spdy2_unittest.cc
index 7b72321..be91009 100644
--- a/net/spdy/spdy_websocket_stream_spdy2_unittest.cc
+++ b/net/spdy/spdy_websocket_stream_spdy2_unittest.cc
@@ -354,7 +354,7 @@ TEST_F(SpdyWebSocketStreamSpdy2Test, Basic) {
GURL url("ws://example.com/echo");
ASSERT_EQ(OK, websocket_stream_->InitializeStream(url, HIGHEST, net_log));
- ASSERT_TRUE(websocket_stream_->stream_);
+ ASSERT_TRUE(websocket_stream_->stream_.get());
SendRequest();
diff --git a/net/spdy/spdy_websocket_stream_spdy3_unittest.cc b/net/spdy/spdy_websocket_stream_spdy3_unittest.cc
index 6ce2901..d6e3479 100644
--- a/net/spdy/spdy_websocket_stream_spdy3_unittest.cc
+++ b/net/spdy/spdy_websocket_stream_spdy3_unittest.cc
@@ -355,7 +355,7 @@ TEST_F(SpdyWebSocketStreamSpdy3Test, Basic) {
GURL url("ws://example.com/echo");
ASSERT_EQ(OK, websocket_stream_->InitializeStream(url, HIGHEST, net_log));
- ASSERT_TRUE(websocket_stream_->stream_);
+ ASSERT_TRUE(websocket_stream_->stream_.get());
SendRequest();
diff --git a/net/spdy/spdy_write_queue.cc b/net/spdy/spdy_write_queue.cc
index b4e7376..2e17977 100644
--- a/net/spdy/spdy_write_queue.cc
+++ b/net/spdy/spdy_write_queue.cc
@@ -22,7 +22,7 @@ SpdyWriteQueue::PendingWrite::PendingWrite(
: frame_type(frame_type),
frame_producer(frame_producer),
stream(stream),
- has_stream(stream != NULL) {}
+ has_stream(stream.get() != NULL) {}
SpdyWriteQueue::PendingWrite::~PendingWrite() {}
@@ -36,7 +36,7 @@ void SpdyWriteQueue::Enqueue(RequestPriority priority,
SpdyFrameType frame_type,
scoped_ptr<SpdyBufferProducer> frame_producer,
const base::WeakPtr<SpdyStream>& stream) {
- if (stream)
+ if (stream.get())
DCHECK_EQ(stream->priority(), priority);
queue_[priority].push_back(
PendingWrite(frame_type, frame_producer.release(), stream));
@@ -53,7 +53,7 @@ bool SpdyWriteQueue::Dequeue(SpdyFrameType* frame_type,
frame_producer->reset(pending_write.frame_producer);
*stream = pending_write.stream;
if (pending_write.has_stream)
- DCHECK(*stream);
+ DCHECK(stream->get());
return true;
}
}
@@ -62,7 +62,7 @@ bool SpdyWriteQueue::Dequeue(SpdyFrameType* frame_type,
void SpdyWriteQueue::RemovePendingWritesForStream(
const base::WeakPtr<SpdyStream>& stream) {
- DCHECK(stream);
+ DCHECK(stream.get());
if (DCHECK_IS_ON()) {
// |stream| should not have pending writes in a queue not matching
// its priority.
@@ -81,7 +81,7 @@ void SpdyWriteQueue::RemovePendingWritesForStream(
std::deque<PendingWrite>::iterator out_it = queue->begin();
for (std::deque<PendingWrite>::const_iterator it = queue->begin();
it != queue->end(); ++it) {
- if (it->stream == stream) {
+ if (it->stream.get() == stream.get()) {
delete it->frame_producer;
} else {
*out_it = *it;
@@ -99,8 +99,8 @@ void SpdyWriteQueue::RemovePendingWritesForStreamsAfter(
std::deque<PendingWrite>::iterator out_it = queue->begin();
for (std::deque<PendingWrite>::const_iterator it = queue->begin();
it != queue->end(); ++it) {
- if (it->stream && (it->stream->stream_id() > last_good_stream_id ||
- it->stream->stream_id() == 0)) {
+ if (it->stream.get() && (it->stream->stream_id() > last_good_stream_id ||
+ it->stream->stream_id() == 0)) {
delete it->frame_producer;
} else {
*out_it = *it;