diff options
author | cbentzel@chromium.org <cbentzel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-07-21 13:51:32 +0000 |
---|---|---|
committer | cbentzel@chromium.org <cbentzel@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-07-21 13:51:32 +0000 |
commit | 7082b2329218da9a77fd6bc9587e86d0ed817196 (patch) | |
tree | dfc92b8bec2e649dc05e011743bf846bffb8151f /net | |
parent | 598b1ff52fea0f167462f5bd92070756b9a65090 (diff) | |
download | chromium_src-7082b2329218da9a77fd6bc9587e86d0ed817196.zip chromium_src-7082b2329218da9a77fd6bc9587e86d0ed817196.tar.gz chromium_src-7082b2329218da9a77fd6bc9587e86d0ed817196.tar.bz2 |
Add the actual data being read to the OnBytesRead callback.
This is needed for code at http://code.google.com/p/page-speed/source/browse/bin/trunk/src/pagespeed/pagespeed_input_populator.cc#148
Contributed by: bmcquade@google.com
BUG=48192
TEST=Added new unit tests, ran net_unittests.
Review URL: http://codereview.chromium.org/2849041
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@53178 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/net.gyp | 1 | ||||
-rw-r--r-- | net/url_request/url_request_job.cc | 68 | ||||
-rw-r--r-- | net/url_request/url_request_job.h | 26 | ||||
-rw-r--r-- | net/url_request/url_request_job_tracker.cc | 5 | ||||
-rw-r--r-- | net/url_request/url_request_job_tracker.h | 14 | ||||
-rw-r--r-- | net/url_request/url_request_job_tracker_unittest.cc | 232 |
6 files changed, 313 insertions, 33 deletions
diff --git a/net/net.gyp b/net/net.gyp index c87bdcbd..1dcaf25 100644 --- a/net/net.gyp +++ b/net/net.gyp @@ -779,6 +779,7 @@ 'tools/dump_cache/url_to_filename_encoder.cc', 'tools/dump_cache/url_to_filename_encoder.h', 'tools/dump_cache/url_to_filename_encoder_unittest.cc', + 'url_request/url_request_job_tracker_unittest.cc', 'url_request/url_request_unittest.cc', 'url_request/url_request_unittest.h', 'url_request/view_cache_helper_unittest.cc', diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc index 7ce60c3..23a7d85 100644 --- a/net/url_request/url_request_job.cc +++ b/net/url_request/url_request_job.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -32,7 +32,7 @@ URLRequestJob::URLRequestJob(URLRequest* request) is_compressed_(false), done_(false), filter_needs_more_output_space_(false), - read_buffer_len_(0), + filtered_read_buffer_len_(0), has_handled_response_(false), expected_content_size_(-1), deferred_redirect_status_code_(-1), @@ -197,19 +197,19 @@ bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) { DCHECK_LT(buf_size, 1000000); // sanity check DCHECK(buf); DCHECK(bytes_read); + DCHECK(filtered_read_buffer_ == NULL); + DCHECK_EQ(0, filtered_read_buffer_len_); *bytes_read = 0; // Skip Filter if not present if (!filter_.get()) { - rv = ReadRawData(buf, buf_size, bytes_read); - if (rv && *bytes_read > 0) - RecordBytesRead(*bytes_read); + rv = ReadRawDataHelper(buf, buf_size, bytes_read); } else { // Save the caller's buffers while we do IO // in the filter's buffers. - read_buffer_ = buf; - read_buffer_len_ = buf_size; + filtered_read_buffer_ = buf; + filtered_read_buffer_len_ = buf_size; if (ReadFilteredData(bytes_read)) { rv = true; // we have data to return @@ -272,9 +272,7 @@ bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) { if (!filter_->stream_data_len() && !is_done()) { net::IOBuffer* stream_buffer = filter_->stream_buffer(); int stream_buffer_size = filter_->stream_buffer_size(); - rv = ReadRawData(stream_buffer, stream_buffer_size, bytes_read); - if (rv && *bytes_read > 0) - RecordBytesRead(*bytes_read); + rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read); } return rv; } @@ -294,9 +292,10 @@ void URLRequestJob::FilteredDataRead(int bytes_read) { bool URLRequestJob::ReadFilteredData(int* bytes_read) { DCHECK(filter_.get()); // don't add data if there is no filter - DCHECK(read_buffer_ != NULL); // we need to have a buffer to fill - DCHECK_GT(read_buffer_len_, 0); // sanity check - DCHECK_LT(read_buffer_len_, 1000000); // sanity check + DCHECK(filtered_read_buffer_ != NULL); // we need to have a buffer to fill + DCHECK_GT(filtered_read_buffer_len_, 0); // sanity check + DCHECK_LT(filtered_read_buffer_len_, 1000000); // sanity check + DCHECK(raw_read_buffer_ == NULL); // there should be no raw read buffer yet bool rv = false; *bytes_read = 0; @@ -322,10 +321,11 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { if ((filter_->stream_data_len() || filter_needs_more_output_space_) && !is_done()) { // Get filtered data. - int filtered_data_len = read_buffer_len_; + int filtered_data_len = filtered_read_buffer_len_; Filter::FilterStatus status; int output_buffer_size = filtered_data_len; - status = filter_->ReadData(read_buffer_->data(), &filtered_data_len); + status = filter_->ReadData(filtered_read_buffer_->data(), + &filtered_data_len); if (filter_needs_more_output_space_ && 0 == filtered_data_len) { // filter_needs_more_output_space_ was mistaken... there are no more bytes @@ -390,8 +390,28 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { if (rv) { // When we successfully finished a read, we no longer need to // save the caller's buffers. Release our reference. - read_buffer_ = NULL; - read_buffer_len_ = 0; + filtered_read_buffer_ = NULL; + filtered_read_buffer_len_ = 0; + } + return rv; +} + +bool URLRequestJob::ReadRawDataHelper(net::IOBuffer* buf, int buf_size, + int* bytes_read) { + DCHECK(!request_->status().is_io_pending()); + DCHECK(raw_read_buffer_ == NULL); + + // Keep a pointer to the read buffer, so we have access to it in the + // OnRawReadComplete() callback in the event that the read completes + // asynchronously. + raw_read_buffer_ = buf; + bool rv = ReadRawData(buf, buf_size, bytes_read); + + if (!request_->status().is_io_pending()) { + // If the read completes synchronously, either success or failure, + // invoke the OnRawReadComplete callback so we can account for the + // completed read. + OnRawReadComplete(*bytes_read); } return rv; } @@ -526,8 +546,7 @@ void URLRequestJob::NotifyReadComplete(int bytes_read) { // The headers should be complete before reads complete DCHECK(has_handled_response_); - if (bytes_read > 0) - RecordBytesRead(bytes_read); + OnRawReadComplete(bytes_read); // Don't notify if we had an error. if (!request_->status().is_success()) @@ -640,6 +659,14 @@ bool URLRequestJob::FilterHasData() { return filter_.get() && filter_->stream_data_len(); } +void URLRequestJob::OnRawReadComplete(int bytes_read) { + DCHECK(raw_read_buffer_); + if (bytes_read > 0) { + RecordBytesRead(bytes_read); + } + raw_read_buffer_ = NULL; +} + void URLRequestJob::RecordBytesRead(int bytes_read) { if (is_profiling()) { ++(metrics_->number_of_read_IO_); @@ -647,7 +674,8 @@ void URLRequestJob::RecordBytesRead(int bytes_read) { } filter_input_byte_count_ += bytes_read; UpdatePacketReadTimes(); // Facilitate stats recording if it is active. - g_url_request_job_tracker.OnBytesRead(this, bytes_read); + g_url_request_job_tracker.OnBytesRead(this, raw_read_buffer_->data(), + bytes_read); } const URLRequestStatus URLRequestJob::GetStatus() { diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h index 617bbf9..da6fc16 100644 --- a/net/url_request/url_request_job.h +++ b/net/url_request/url_request_job.h @@ -1,4 +1,4 @@ -// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -311,13 +311,23 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // an error occurred (or we are waiting for IO to complete). bool ReadRawDataForFilter(int *bytes_read); + // Invokes ReadRawData and records bytes read if the read completes + // synchronously. + bool ReadRawDataHelper(net::IOBuffer* buf, int buf_size, int* bytes_read); + // Called in response to a redirect that was not canceled to follow the // redirect. The current job will be replaced with a new job loading the // given redirect destination. void FollowRedirect(const GURL& location, int http_status_code); - // Updates the profiling info and notifies observers that bytes_read bytes - // have been read. + // Called after every raw read. If |bytes_read| is > 0, this indicates + // a successful read of |bytes_read| unfiltered bytes. If |bytes_read| + // is 0, this indicates that there is no additional data to read. If + // |bytes_read| is < 0, an error occurred and no bytes were read. + void OnRawReadComplete(int bytes_read); + + // Updates the profiling info and notifies observers that an additional + // |bytes_read| unfiltered bytes have been read for this job. void RecordBytesRead(int bytes_read); // Called to query whether there is data available in the filter to be read @@ -348,8 +358,12 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // processing the filtered data, we return the data in the caller's buffer. // While the async IO is in progress, we save the user buffer here, and // when the IO completes, we fill this in. - scoped_refptr<net::IOBuffer> read_buffer_; - int read_buffer_len_; + scoped_refptr<net::IOBuffer> filtered_read_buffer_; + int filtered_read_buffer_len_; + + // We keep a pointer to the read buffer while asynchronous reads are + // in progress, so we are able to pass those bytes to job observers. + scoped_refptr<net::IOBuffer> raw_read_buffer_; // Used by HandleResponseIfNecessary to track whether we've sent the // OnResponseStarted callback and potentially redirect callbacks as well. @@ -376,7 +390,7 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // as gathered here is post-SSL, and post-cache-fetch, and does not reflect // true packet arrival times in such cases. - // Total number of bytes read from network (or cache) and and typically handed + // Total number of bytes read from network (or cache) and typically handed // to filter to process. Used to histogram compression ratios, and error // recovery scenarios in filters. int64 filter_input_byte_count_; diff --git a/net/url_request/url_request_job_tracker.cc b/net/url_request/url_request_job_tracker.cc index 1f5b33c..e3e5d36 100644 --- a/net/url_request/url_request_job_tracker.cc +++ b/net/url_request/url_request_job_tracker.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -51,7 +51,8 @@ void URLRequestJobTracker::OnJobRedirect(URLRequestJob* job, } void URLRequestJobTracker::OnBytesRead(URLRequestJob* job, + const char* buf, int byte_count) { FOR_EACH_OBSERVER(JobObserver, observers_, - OnBytesRead(job, byte_count)); + OnBytesRead(job, buf, byte_count)); } diff --git a/net/url_request/url_request_job_tracker.h b/net/url_request/url_request_job_tracker.h index 5f12fc7..e03b71f 100644 --- a/net/url_request/url_request_job_tracker.h +++ b/net/url_request/url_request_job_tracker.h @@ -1,4 +1,4 @@ -// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +// Copyright (c) 2010 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -43,9 +43,13 @@ class URLRequestJobTracker { virtual void OnJobRedirect(URLRequestJob* job, const GURL& location, int status_code) = 0; - // Called when a new chunk of bytes has been read for the given job. The - // byte count is the number of bytes for that read event only. - virtual void OnBytesRead(URLRequestJob* job, int byte_count) = 0; + // Called when a new chunk of unfiltered bytes has been read for + // the given job. |byte_count| is the number of bytes for that + // read event only. |buf| is a pointer to the data buffer that + // contains those bytes. The data in |buf| is only valid for the + // duration of the OnBytesRead callback. + virtual void OnBytesRead(URLRequestJob* job, const char* buf, + int byte_count) = 0; virtual ~JobObserver() {} }; @@ -74,7 +78,7 @@ class URLRequestJobTracker { int status_code); // Bytes read notifications. - void OnBytesRead(URLRequestJob* job, int byte_count); + void OnBytesRead(URLRequestJob* job, const char* buf, int byte_count); // allows iteration over all active jobs JobIterator begin() const { diff --git a/net/url_request/url_request_job_tracker_unittest.cc b/net/url_request/url_request_job_tracker_unittest.cc new file mode 100644 index 0000000..3ddbcc2 --- /dev/null +++ b/net/url_request/url_request_job_tracker_unittest.cc @@ -0,0 +1,232 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <string.h> +#include <algorithm> +#include <string> +#include <vector> +#include "base/message_loop.h" +#include "googleurl/src/gurl.h" +#include "net/base/filter.h" +#include "net/base/io_buffer.h" +#include "net/url_request/url_request.h" +#include "net/url_request/url_request_job.h" +#include "net/url_request/url_request_job_tracker.h" +#include "net/url_request/url_request_status.h" +#include "net/url_request/url_request_unittest.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "testing/platform_test.h" + +using testing::Eq; +using testing::InSequence; +using testing::NotNull; +using testing::StrictMock; + +namespace { + +const char kBasic[] = "Hello\n"; + +// The above string "Hello\n", gzip compressed. +const unsigned char kCompressed[] = { + 0x1f, 0x8b, 0x08, 0x08, 0x38, 0x18, 0x2e, 0x4c, 0x00, 0x03, 0x63, + 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x2e, 0x68, + 0x74, 0x6d, 0x6c, 0x00, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0xe7, 0x02, + 0x00, 0x16, 0x35, 0x96, 0x31, 0x06, 0x00, 0x00, 0x00 +}; + +bool GetResponseBody(const GURL& url, std::string* out_body) { + if (url.spec() == "test:basic") { + *out_body = kBasic; + } else if (url.spec() == "test:compressed") { + out_body->assign(reinterpret_cast<const char*>(kCompressed), + sizeof(kCompressed)); + } else { + return false; + } + + return true; +} + +class MockJobObserver : public URLRequestJobTracker::JobObserver { + public: + MOCK_METHOD1(OnJobAdded, void(URLRequestJob* job)); + MOCK_METHOD1(OnJobRemoved, void(URLRequestJob* job)); + MOCK_METHOD2(OnJobDone, void(URLRequestJob* job, + const URLRequestStatus& status)); + MOCK_METHOD3(OnJobRedirect, void(URLRequestJob* job, + const GURL& location, + int status_code)); + MOCK_METHOD3(OnBytesRead, void(URLRequestJob* job, + const char* buf, + int byte_count)); +}; + +// A URLRequestJob that returns static content for given URLs. We do +// not use URLRequestTestJob here because URLRequestTestJob fakes +// async operations by calling ReadRawData synchronously in an async +// callback. This test requires a URLRequestJob that returns false for +// async reads, in order to exercise the real async read codepath. +class URLRequestJobTrackerTestJob : public URLRequestJob { + public: + URLRequestJobTrackerTestJob(URLRequest* request, bool async_reads) + : URLRequestJob(request), async_reads_(async_reads) {} + + void Start() { + ASSERT_TRUE(GetResponseBody(request_->url(), &response_data_)); + + // Start reading asynchronously so that all error reporting and data + // callbacks happen as they would for network requests. + MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( + this, &URLRequestJobTrackerTestJob::NotifyHeadersComplete)); + } + + bool ReadRawData(net::IOBuffer* buf, int buf_size, + int *bytes_read) { + const size_t bytes_to_read = std::min( + response_data_.size(), static_cast<size_t>(buf_size)); + + // Regardless of whether we're performing a sync or async read, + // copy the data into the caller's buffer now. That way we don't + // have to hold on to the buffers in the async case. + memcpy(buf->data(), response_data_.data(), bytes_to_read); + response_data_.erase(0, bytes_to_read); + + if (async_reads_) { + SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0)); + MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod( + this, &URLRequestJobTrackerTestJob::OnReadCompleted, + bytes_to_read)); + } else { + SetStatus(URLRequestStatus()); + *bytes_read = bytes_to_read; + } + return !async_reads_; + } + + void OnReadCompleted(int status) { + if (status == 0) { + NotifyDone(URLRequestStatus()); + } else if (status > 0) { + SetStatus(URLRequestStatus()); + } else { + ASSERT_FALSE(true) << "Unexpected OnReadCompleted callback."; + } + + NotifyReadComplete(status); + } + + bool GetContentEncodings( + std::vector<Filter::FilterType>* encoding_types) { + if (request_->url().spec() == "test:basic") { + return false; + } else if (request_->url().spec() == "test:compressed") { + encoding_types->push_back(Filter::FILTER_TYPE_GZIP); + return true; + } else { + return URLRequestJob::GetContentEncodings(encoding_types); + } + } + + // The data to send, will be set in Start(). + std::string response_data_; + + // Should reads be synchronous or asynchronous? + const bool async_reads_; +}; + +// Google Mock Matcher to check two URLRequestStatus instances for +// equality. +MATCHER_P(StatusEq, other, "") { + return (arg.status() == other.status() && + arg.os_error() == other.os_error()); +} + +// Google Mock Matcher to check that two blocks of memory are equal. +MATCHER_P2(MemEq, other, len, "") { + return memcmp(arg, other, len) == 0; +} + +class URLRequestJobTrackerTest : public PlatformTest { + protected: + static void SetUpTestCase() { + URLRequest::RegisterProtocolFactory("test", &Factory); + } + + virtual void SetUp() { + g_async_reads = true; + } + + void AssertJobTrackerCallbacks(const char* url) { + InSequence seq; + testing::StrictMock<MockJobObserver> observer; + + const GURL gurl(url); + std::string body; + ASSERT_TRUE(GetResponseBody(gurl, &body)); + + // We expect to receive one call for each method on the JobObserver, + // in the following order: + EXPECT_CALL(observer, OnJobAdded(NotNull())); + EXPECT_CALL(observer, OnBytesRead(NotNull(), + MemEq(body.data(), body.size()), + Eq(static_cast<int>(body.size())))); + EXPECT_CALL(observer, OnJobDone(NotNull(), StatusEq(URLRequestStatus()))); + EXPECT_CALL(observer, OnJobRemoved(NotNull())); + + // Attach our observer and perform the resource fetch. + g_url_request_job_tracker.AddObserver(&observer); + Fetch(gurl); + g_url_request_job_tracker.RemoveObserver(&observer); + } + + void Fetch(const GURL& url) { + TestDelegate d; + { + URLRequest request(url, &d); + request.Start(); + MessageLoop::current()->RunAllPending(); + } + + // A few sanity checks to make sure that the delegate also + // receives the expected callbacks. + EXPECT_EQ(1, d.response_started_count()); + EXPECT_FALSE(d.received_data_before_response()); + EXPECT_STREQ(kBasic, d.data_received().c_str()); + } + + static URLRequest::ProtocolFactory Factory; + static bool g_async_reads; +}; + +// static +URLRequestJob* URLRequestJobTrackerTest::Factory(URLRequest* request, + const std::string& scheme) { + return new URLRequestJobTrackerTestJob(request, g_async_reads); +} + +// static +bool URLRequestJobTrackerTest::g_async_reads = true; + +TEST_F(URLRequestJobTrackerTest, BasicAsync) { + g_async_reads = true; + AssertJobTrackerCallbacks("test:basic"); +} + +TEST_F(URLRequestJobTrackerTest, BasicSync) { + g_async_reads = false; + AssertJobTrackerCallbacks("test:basic"); +} + +TEST_F(URLRequestJobTrackerTest, CompressedAsync) { + g_async_reads = true; + AssertJobTrackerCallbacks("test:compressed"); +} + +TEST_F(URLRequestJobTrackerTest, CompressedSync) { + g_async_reads = false; + AssertJobTrackerCallbacks("test:compressed"); +} + +} // namespace |