diff options
author | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-03-13 20:29:58 +0000 |
---|---|---|
committer | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-03-13 20:29:58 +0000 |
commit | 17e7b20bdf6906f4bdd574a9e0d4526456f3c301 (patch) | |
tree | a227f6d81a2929759313242e183bcc14ff89fd28 /net/url_request | |
parent | 7e8488b8c4ee4c0d29ba266d9b405d68b9fbdb2e (diff) | |
download | chromium_src-17e7b20bdf6906f4bdd574a9e0d4526456f3c301.zip chromium_src-17e7b20bdf6906f4bdd574a9e0d4526456f3c301.tar.gz chromium_src-17e7b20bdf6906f4bdd574a9e0d4526456f3c301.tar.bz2 |
wtc: please look at URL related code, and hooks and nits
you might have commented on before.
huanr: please look at sdch_filter code. The intent was
no semantic change, and only change in histograms and
stats gathered. I wanted to be sure I had better stats
on several failure cases, as the turn-around time of
adding stats to instrument such cases after they surface
is just too long.
The big feature is the mechanism for getting the total
number of bytes passed to a filter. We use the filter
context to achieve this, and then the SDCH filter can
calculate compression ratio (from pre-gunzip vs post
SDCH decompress).
The number of bytes read was also histogrammed in a number
of error scenarios, to better diagnose what is going on
when these cases arrise (example: When some data is still
buffered in the VCDIFF decoder).
The sdch_filter destructor was getting long and hard
to read with multiple if blocks, so I cleaned that up
as well a bit (less indentation, and use of early returns).
Nits not included in previous CL that earlier are listed
as well.
r=wtc,huanr
Review URL: http://codereview.chromium.org/40319
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@11665 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r-- | net/url_request/url_request_http_job.cc | 5 | ||||
-rw-r--r-- | net/url_request/url_request_job.cc | 8 | ||||
-rw-r--r-- | net/url_request/url_request_job.h | 8 | ||||
-rw-r--r-- | net/url_request/url_request_job_metrics.cc | 5 | ||||
-rw-r--r-- | net/url_request/url_request_job_metrics.h | 2 |
5 files changed, 19 insertions, 9 deletions
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc index 657af1b..cf380de 100644 --- a/net/url_request/url_request_http_job.cc +++ b/net/url_request/url_request_http_job.cc @@ -204,10 +204,7 @@ bool URLRequestHttpJob::GetContentEncodings( } if (!encoding_types->empty()) { - std::string mime_type; - GetMimeType(&mime_type); - // TODO(jar): Need to change this call to use the FilterContext interfaces. - Filter::FixupEncodingTypes(IsSdchResponse(), mime_type, encoding_types); + Filter::FixupEncodingTypes(*this, encoding_types); } return !encoding_types->empty(); } diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc index 2873ae2..4075ede 100644 --- a/net/url_request/url_request_job.cc +++ b/net/url_request/url_request_job.cc @@ -28,7 +28,8 @@ URLRequestJob::URLRequestJob(URLRequest* request) read_buffer_(NULL), read_buffer_len_(0), has_handled_response_(false), - expected_content_size_(-1) { + expected_content_size_(-1), + filter_input_byte_count_(0) { is_profiling_ = request->enable_profiling(); if (is_profiling()) { metrics_.reset(new URLRequestJobMetrics()); @@ -86,6 +87,10 @@ void URLRequestJob::ContinueDespiteLastError() { NOTREACHED(); } +int64 URLRequestJob::GetByteReadCount() const { + return filter_input_byte_count_ ; +} + bool URLRequestJob::GetURL(GURL* gurl) const { if (!request_) return false; @@ -505,6 +510,7 @@ void URLRequestJob::RecordBytesRead(int bytes_read) { ++(metrics_->number_of_read_IO_); metrics_->total_bytes_read_ += bytes_read; } + filter_input_byte_count_ += bytes_read; g_url_request_job_tracker.OnBytesRead(this, bytes_read); } diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h index 121519e..f99ffdf 100644 --- a/net/url_request/url_request_job.h +++ b/net/url_request/url_request_job.h @@ -195,10 +195,11 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // FilterContext methods: // These methods are not applicable to all connections. virtual bool GetMimeType(std::string* mime_type) const { return false; } + virtual int64 GetByteReadCount() const; virtual bool GetURL(GURL* gurl) const; virtual base::Time GetRequestTime() const; virtual bool IsCachedContent() const; - virtual int GetInputStreambufferSize() const { return kFilterBufSize; } + virtual int GetInputStreamBufferSize() const { return kFilterBufSize; } protected: // Notifies the job that headers have been received. @@ -315,6 +316,11 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>, // Expected content size int64 expected_content_size_; + // Total number of bytes read from network (or cache) and and typically handed + // to filter to process. Used to histogram compression ratios, and error + // recovery scenarios in filters. + int64 filter_input_byte_count_; + DISALLOW_COPY_AND_ASSIGN(URLRequestJob); }; diff --git a/net/url_request/url_request_job_metrics.cc b/net/url_request/url_request_job_metrics.cc index 69a0605..eea21fa 100644 --- a/net/url_request/url_request_job_metrics.cc +++ b/net/url_request/url_request_job_metrics.cc @@ -23,8 +23,9 @@ void URLRequestJobMetrics::AppendText(std::wstring* text) { TimeDelta elapsed = end_time_ - start_time_; StringAppendF(text, - L"; total bytes read = %d; read calls = %d; time = %lld ms;", - total_bytes_read_, number_of_read_IO_, elapsed.InMilliseconds()); + L"; total bytes read = %ld; read calls = %d; time = %lld ms;", + static_cast<long>(total_bytes_read_), + number_of_read_IO_, elapsed.InMilliseconds()); if (success_) { text->append(L" success."); diff --git a/net/url_request/url_request_job_metrics.h b/net/url_request/url_request_job_metrics.h index 42108db..11ee288 100644 --- a/net/url_request/url_request_job_metrics.h +++ b/net/url_request/url_request_job_metrics.h @@ -34,7 +34,7 @@ class URLRequestJobMetrics { base::TimeTicks end_time_; // Total number of bytes the job reads from underline IO. - int total_bytes_read_; + int64 total_bytes_read_; // Number of IO read operations the job issues. int number_of_read_IO_; |