diff options
author | cbentzel@chromium.org <cbentzel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-07-31 15:24:14 +0000 |
---|---|---|
committer | cbentzel@chromium.org <cbentzel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-07-31 15:24:14 +0000 |
commit | e0bc50007b8233b5b77b63cc6066b45f89edd1d9 (patch) | |
tree | f7de668a705f466cff8d6c6bb70594c448e002a2 /net/url_request | |
parent | 446b1856d31c70cac44552dd1536a8e3a90ea850 (diff) | |
download | chromium_src-e0bc50007b8233b5b77b63cc6066b45f89edd1d9.zip chromium_src-e0bc50007b8233b5b77b63cc6066b45f89edd1d9.tar.gz chromium_src-e0bc50007b8233b5b77b63cc6066b45f89edd1d9.tar.bz2 |
Revert 54448 - Add the actual data being read to the OnBytesRead callback, take two.
I'm not convinced that this actually introduced any problems (as compared to being bitten by flakiness), but I'll spend more time investigating on a weekday.
This change was originally committed as
http://src.chromium.org/viewvc/chrome?view=rev&revision=53178
then rolled back as
http://src.chromium.org/viewvc/chrome?view=rev&revision=53416
due to a breakage in FLAKY_UnknownSize download test.
FLAKY_UnknownSize depended on code with a bug that was exposed by this change. That bug has since been fixed and committed as
http://src.chromium.org/viewvc/chrome?view=rev&revision=53876
So this change is ready for another round of review. It has not changed at all since being committed.
Contributed by: bmcquade@google.com
BUG=48192
TEST=Added new unit tests, ran net_unittests.
Review URL: http://codereview.chromium.org/3010037
TBR=cbentzel@chromium.org
Review URL: http://codereview.chromium.org/3047037
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@54449 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r-- | net/url_request/url_request_job.cc | 66 | ||||
-rw-r--r-- | net/url_request/url_request_job.h | 24 | ||||
-rw-r--r-- | net/url_request/url_request_job_tracker.cc | 5 | ||||
-rw-r--r-- | net/url_request/url_request_job_tracker.h | 14 | ||||
-rw-r--r-- | net/url_request/url_request_job_tracker_unittest.cc | 232 |
5 files changed, 31 insertions, 310 deletions
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc index 2295047..11fc46d5 100644 --- a/net/url_request/url_request_job.cc +++ b/net/url_request/url_request_job.cc @@ -32,7 +32,7 @@ URLRequestJob::URLRequestJob(URLRequest* request) is_compressed_(false), done_(false), filter_needs_more_output_space_(false), - filtered_read_buffer_len_(0), + read_buffer_len_(0), has_handled_response_(false), expected_content_size_(-1), deferred_redirect_status_code_(-1), @@ -197,19 +197,19 @@ bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) { DCHECK_LT(buf_size, 1000000); // sanity check DCHECK(buf); DCHECK(bytes_read); - DCHECK(filtered_read_buffer_ == NULL); - DCHECK_EQ(0, filtered_read_buffer_len_); *bytes_read = 0; // Skip Filter if not present if (!filter_.get()) { - rv = ReadRawDataHelper(buf, buf_size, bytes_read); + rv = ReadRawData(buf, buf_size, bytes_read); + if (rv && *bytes_read > 0) + RecordBytesRead(*bytes_read); } else { // Save the caller's buffers while we do IO // in the filter's buffers. - filtered_read_buffer_ = buf; - filtered_read_buffer_len_ = buf_size; + read_buffer_ = buf; + read_buffer_len_ = buf_size; if (ReadFilteredData(bytes_read)) { rv = true; // we have data to return @@ -272,7 +272,9 @@ bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) { if (!filter_->stream_data_len() && !is_done()) { net::IOBuffer* stream_buffer = filter_->stream_buffer(); int stream_buffer_size = filter_->stream_buffer_size(); - rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); + rv = ReadRawData(stream_buffer, stream_buffer_size, bytes_read); + if (rv && *bytes_read > 0) + RecordBytesRead(*bytes_read); } return rv; } @@ -292,10 +294,9 @@ void URLRequestJob::FilteredDataRead(int bytes_read) { bool URLRequestJob::ReadFilteredData(int* bytes_read) { DCHECK(filter_.get()); // don't add data if there is no filter - DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill - DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check - DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check - DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet + DCHECK(read_buffer_ != NULL); // we need to have a buffer to fill + DCHECK_GT(read_buffer_len_, 0); // sanity check + DCHECK_LT(read_buffer_len_, 1000000); // sanity check bool rv = false; *bytes_read = 0; @@ -321,11 +322,10 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { if ((filter_->stream_data_len() || filter_needs_more_output_space_) && !is_done()) { // Get filtered data. - int filtered_data_len = filtered_read_buffer_len_; + int filtered_data_len = read_buffer_len_; Filter::FilterStatus status; int output_buffer_size = filtered_data_len; - status = filter_->ReadData(filtered_read_buffer_->data(), - &filtered_data_len); + status = filter_->ReadData(read_buffer_->data(), &filtered_data_len); if (filter_needs_more_output_space_ && 0 == filtered_data_len) { // filter_needs_more_output_space_ was mistaken... there are no more bytes @@ -390,28 +390,8 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { if (rv) { // When we successfully finished a read, we no longer need to // save the caller's buffers. Release our reference. - filtered_read_buffer_ = NULL; - filtered_read_buffer_len_ = 0; - } - return rv; -} - -bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size, - int* bytes_read) { - DCHECK(!request_->status().is_io_pending()); - DCHECK(raw_read_buffer_ == NULL); - - // Keep a pointer to the read buffer, so we have access to it in the - // OnRawReadComplete() callback in the event that the read completes - // asynchronously. - raw_read_buffer_ = buf; - bool rv = ReadRawData(buf, buf_size, bytes_read); - - if (!request_->status().is_io_pending()) { - // If the read completes synchronously, either success or failure, - // invoke the OnRawReadComplete callback so we can account for the - // completed read. - OnRawReadComplete(*bytes_read); + read_buffer_ = NULL; + read_buffer_len_ = 0; } return rv; } @@ -546,7 +526,8 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) { // The headers should be complete before reads complete DCHECK(has_handled_response_); - OnRawReadComplete(bytes_read); + if (bytes_read > 0) + RecordBytesRead(bytes_read); // Don't notify if we had an error. if (!request_->status().is_success()) @@ -659,14 +640,6 @@ bool URLRequestJob::FilterHasData() { return filter_.get() && filter_->stream_data_len(); } -void URLRequestJob::OnRawReadComplete(int bytes_read) { - DCHECK(raw_read_buffer_); - if (bytes_read > 0) { - RecordBytesRead(bytes_read); - } - raw_read_buffer_ = NULL; -} - void URLRequestJob::RecordBytesRead(int bytes_read) { if (is_profiling()) { ++(metrics_->number_of_read_IO_); @@ -674,8 +647,7 @@ void URLRequestJob::RecordBytesRead(int bytes_read) { } filter_input_byte_count_ += bytes_read; UpdatePacketReadTimes(); // Facilitate stats recording if it is active. - g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(), - bytes_read); + g_url_request_job_tracker.OnBytesRead(this, bytes_read); } const URLRequestStatus URLRequestJob::GetStatus() { diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h index ca69940..05f55bc 100644 --- a/net/url_request/url_request_job.h +++ b/net/url_request/url_request_job.h @@ -313,23 +313,13 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // an error occurred (or we are waiting for IO to complete). bool ReadRawDataForFilter(int *bytes_read); - // Invokes ReadRawData and records bytes read if the read completes - // synchronously. - bool ReadRawDataHelper(net::IOBuffer* buf, int buf_size, int* bytes_read); - // Called in response to a redirect that was not canceled to follow the // redirect. The current job will be replaced with a new job loading the // given redirect destination. void FollowRedirect(const GURL& location, int http_status_code); - // Called after every raw read. If |bytes_read| is > 0, this indicates - // a successful read of |bytes_read| unfiltered bytes. If |bytes_read| - // is 0, this indicates that there is no additional data to read. If - // |bytes_read| is < 0, an error occurred and no bytes were read. - void OnRawReadComplete(int bytes_read); - - // Updates the profiling info and notifies observers that an additional - // |bytes_read| unfiltered bytes have been read for this job. + // Updates the profiling info and notifies observers that bytes_read bytes + // have been read. void RecordBytesRead(int bytes_read); // Called to query whether there is data available in the filter to be read @@ -360,12 +350,8 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // processing the filtered data, we return the data in the caller's buffer. // While the async IO is in progress, we save the user buffer here, and // when the IO completes, we fill this in. - scoped_refptr<net::IOBuffer> filtered_read_buffer_; - int filtered_read_buffer_len_; - - // We keep a pointer to the read buffer while asynchronous reads are - // in progress, so we are able to pass those bytes to job observers. - scoped_refptr<net::IOBuffer> raw_read_buffer_; + scoped_refptr<net::IOBuffer> read_buffer_; + int read_buffer_len_; // Used by HandleResponseIfNecessary to track whether we've sent the // OnResponseStarted callback and potentially redirect callbacks as well. @@ -392,7 +378,7 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // as gathered here is post-SSL, and post-cache-fetch, and does not reflect // true packet arrival times in such cases. - // Total number of bytes read from network (or cache) and typically handed + // Total number of bytes read from network (or cache) and and typically handed // to filter to process. Used to histogram compression ratios, and error // recovery scenarios in filters. int64 filter_input_byte_count_; diff --git a/net/url_request/url_request_job_tracker.cc b/net/url_request/url_request_job_tracker.cc index e3e5d36..1f5b33c 100644 --- a/net/url_request/url_request_job_tracker.cc +++ b/net/url_request/url_request_job_tracker.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -51,8 +51,7 @@ void URLRequestJobTracker::OnJobRedirect(URLRequestJob* job, } void URLRequestJobTracker::OnBytesRead(URLRequestJob* job, - const char* buf, int byte_count) { FOR_EACH_OBSERVER(JobObserver, observers_, - OnBytesRead(job, buf, byte_count)); + OnBytesRead(job, byte_count)); } diff --git a/net/url_request/url_request_job_tracker.h b/net/url_request/url_request_job_tracker.h index 8b554b9..4ec69d7 100644 --- a/net/url_request/url_request_job_tracker.h +++ b/net/url_request/url_request_job_tracker.h @@ -1,4 +1,4 @@ -// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -44,13 +44,9 @@ class URLRequestJobTracker { virtual void OnJobRedirect(URLRequestJob* job, const GURL& location, int status_code) = 0; - // Called when a new chunk of unfiltered bytes has been read for - // the given job. |byte_count| is the number of bytes for that - // read event only. |buf| is a pointer to the data buffer that - // contains those bytes. The data in |buf| is only valid for the - // duration of the OnBytesRead callback. - virtual void OnBytesRead(URLRequestJob* job, const char* buf, - int byte_count) = 0; + // Called when a new chunk of bytes has been read for the given job. The + // byte count is the number of bytes for that read event only. + virtual void OnBytesRead(URLRequestJob* job, int byte_count) = 0; virtual ~JobObserver() {} }; @@ -79,7 +75,7 @@ class URLRequestJobTracker { int status_code); // Bytes read notifications. - void OnBytesRead(URLRequestJob* job, const char* buf, int byte_count); + void OnBytesRead(URLRequestJob* job, int byte_count); // allows iteration over all active jobs JobIterator begin() const { diff --git a/net/url_request/url_request_job_tracker_unittest.cc b/net/url_request/url_request_job_tracker_unittest.cc deleted file mode 100644 index 3ddbcc2..0000000 --- a/net/url_request/url_request_job_tracker_unittest.cc +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) 2010 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include <string.h> -#include <algorithm> -#include <string> -#include <vector> -#include "base/message_loop.h" -#include "googleurl/src/gurl.h" -#include "net/base/filter.h" -#include "net/base/io_buffer.h" -#include "net/url_request/url_request.h" -#include "net/url_request/url_request_job.h" -#include "net/url_request/url_request_job_tracker.h" -#include "net/url_request/url_request_status.h" -#include "net/url_request/url_request_unittest.h" -#include "testing/gmock/include/gmock/gmock.h" -#include "testing/gtest/include/gtest/gtest.h" -#include "testing/platform_test.h" - -using testing::Eq; -using testing::InSequence; -using testing::NotNull; -using testing::StrictMock; - -namespace { - -const char kBasic[] = "Hello\n"; - -// The above string "Hello\n", gzip compressed. -const unsigned char kCompressed[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x38, 0x18, 0x2e, 0x4c, 0x00, 0x03, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x2e, 0x68, - 0x74, 0x6d, 0x6c, 0x00, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0xe7, 0x02, - 0x00, 0x16, 0x35, 0x96, 0x31, 0x06, 0x00, 0x00, 0x00 -}; - -bool GetResponseBody(const GURL& url, std::string* out_body) { - if (url.spec() == "test:basic") { - *out_body = kBasic; - } else if (url.spec() == "test:compressed") { - out_body->assign(reinterpret_cast<const char*>(kCompressed), - sizeof(kCompressed)); - } else { - return false; - } - - return true; -} - -class MockJobObserver : public URLRequestJobTracker::JobObserver { - public: - MOCK_METHOD1(OnJobAdded, void(URLRequestJob* job)); - MOCK_METHOD1(OnJobRemoved, void(URLRequestJob* job)); - MOCK_METHOD2(OnJobDone, void(URLRequestJob* job, - const URLRequestStatus& status)); - MOCK_METHOD3(OnJobRedirect, void(URLRequestJob* job, - const GURL& location, - int status_code)); - MOCK_METHOD3(OnBytesRead, void(URLRequestJob* job, - const char* buf, - int byte_count)); -}; - -// A URLRequestJob that returns static content for given URLs. We do -// not use URLRequestTestJob here because URLRequestTestJob fakes -// async operations by calling ReadRawData synchronously in an async -// callback. This test requires a URLRequestJob that returns false for -// async reads, in order to exercise the real async read codepath. -class URLRequestJobTrackerTestJob : public URLRequestJob { - public: - URLRequestJobTrackerTestJob(URLRequest* request, bool async_reads) - : URLRequestJob(request), async_reads_(async_reads) {} - - void Start() { - ASSERT_TRUE(GetResponseBody(request_->url(), &response_data_)); - - // Start reading asynchronously so that all error reporting and data - // callbacks happen as they would for network requests. - MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( - this, &URLRequestJobTrackerTestJob::NotifyHeadersComplete)); - } - - bool ReadRawData(net::IOBuffer* buf, int buf_size, - int *bytes_read) { - const size_t bytes_to_read = std::min( - response_data_.size(), static_cast<size_t>(buf_size)); - - // Regardless of whether we're performing a sync or async read, - // copy the data into the caller's buffer now. That way we don't - // have to hold on to the buffers in the async case. - memcpy(buf->data(), response_data_.data(), bytes_to_read); - response_data_.erase(0, bytes_to_read); - - if (async_reads_) { - SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); - MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( - this, &URLRequestJobTrackerTestJob::OnReadCompleted, - bytes_to_read)); - } else { - SetStatus(URLRequestStatus()); - *bytes_read = bytes_to_read; - } - return !async_reads_; - } - - void OnReadCompleted(int status) { - if (status == 0) { - NotifyDone(URLRequestStatus()); - } else if (status > 0) { - SetStatus(URLRequestStatus()); - } else { - ASSERT_FALSE(true) << "Unexpected OnReadCompleted callback."; - } - - NotifyReadComplete(status); - } - - bool GetContentEncodings( - std::vector<Filter::FilterType>* encoding_types) { - if (request_->url().spec() == "test:basic") { - return false; - } else if (request_->url().spec() == "test:compressed") { - encoding_types->push_back(Filter::FILTER_TYPE_GZIP); - return true; - } else { - return URLRequestJob::GetContentEncodings(encoding_types); - } - } - - // The data to send, will be set in Start(). - std::string response_data_; - - // Should reads be synchronous or asynchronous? - const bool async_reads_; -}; - -// Google Mock Matcher to check two URLRequestStatus instances for -// equality. -MATCHER_P(StatusEq, other, "") { - return (arg.status() == other.status() && - arg.os_error() == other.os_error()); -} - -// Google Mock Matcher to check that two blocks of memory are equal. -MATCHER_P2(MemEq, other, len, "") { - return memcmp(arg, other, len) == 0; -} - -class URLRequestJobTrackerTest : public PlatformTest { - protected: - static void SetUpTestCase() { - URLRequest::RegisterProtocolFactory("test", &Factory); - } - - virtual void SetUp() { - g_async_reads = true; - } - - void AssertJobTrackerCallbacks(const char* url) { - InSequence seq; - testing::StrictMock<MockJobObserver> observer; - - const GURL gurl(url); - std::string body; - ASSERT_TRUE(GetResponseBody(gurl, &body)); - - // We expect to receive one call for each method on the JobObserver, - // in the following order: - EXPECT_CALL(observer, OnJobAdded(NotNull())); - EXPECT_CALL(observer, OnBytesRead(NotNull(), - MemEq(body.data(), body.size()), - Eq(static_cast<int>(body.size())))); - EXPECT_CALL(observer, OnJobDone(NotNull(), StatusEq(URLRequestStatus()))); - EXPECT_CALL(observer, OnJobRemoved(NotNull())); - - // Attach our observer and perform the resource fetch. - g_url_request_job_tracker.AddObserver(&observer); - Fetch(gurl); - g_url_request_job_tracker.RemoveObserver(&observer); - } - - void Fetch(const GURL& url) { - TestDelegate d; - { - URLRequest request(url, &d); - request.Start(); - MessageLoop::current()->RunAllPending(); - } - - // A few sanity checks to make sure that the delegate also - // receives the expected callbacks. - EXPECT_EQ(1, d.response_started_count()); - EXPECT_FALSE(d.received_data_before_response()); - EXPECT_STREQ(kBasic, d.data_received().c_str()); - } - - static URLRequest::ProtocolFactory Factory; - static bool g_async_reads; -}; - -// static -URLRequestJob* URLRequestJobTrackerTest::Factory(URLRequest* request, - const std::string& scheme) { - return new URLRequestJobTrackerTestJob(request, g_async_reads); -} - -// static -bool URLRequestJobTrackerTest::g_async_reads = true; - -TEST_F(URLRequestJobTrackerTest, BasicAsync) { - g_async_reads = true; - AssertJobTrackerCallbacks("test:basic"); -} - -TEST_F(URLRequestJobTrackerTest, BasicSync) { - g_async_reads = false; - AssertJobTrackerCallbacks("test:basic"); -} - -TEST_F(URLRequestJobTrackerTest, CompressedAsync) { - g_async_reads = true; - AssertJobTrackerCallbacks("test:compressed"); -} - -TEST_F(URLRequestJobTrackerTest, CompressedSync) { - g_async_reads = false; - AssertJobTrackerCallbacks("test:compressed"); -} - -} // namespace |