summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-01-24 01:54:05 +0000
committerrvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2009-01-24 01:54:05 +0000
commit1f8859ad6ec7ea807c0330ddf5559e13be5fb26c (patch)
tree68887107d0d40f1b22c7a7a07ccd9d7e4caf157a /net
parent13dc122db24457653d57ff07791043d518eb05e7 (diff)
downloadchromium_src-1f8859ad6ec7ea807c0330ddf5559e13be5fb26c.zip
chromium_src-1f8859ad6ec7ea807c0330ddf5559e13be5fb26c.tar.gz
chromium_src-1f8859ad6ec7ea807c0330ddf5559e13be5fb26c.tar.bz2
Change URLRequest to use a ref-counted buffer for actual IO.The ref-counting will prevent the deletion / reuse of memorywhile the buffer is actually being used by pending IO.This seems a very intrusive change, but at least we will be ableto make sure that it works without having to chase every singledestruction of an URLRequest to make sure that any pending IOwas cancelled, and also allows us to avoid blocking onthe object destruction.BUG=5325
Review URL: http://codereview.chromium.org/18390 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@8603 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/bzip2_filter_unittest.cc7
-rw-r--r--net/base/completion_callback.h17
-rw-r--r--net/base/filter.cc8
-rw-r--r--net/base/filter.h7
-rw-r--r--net/base/gzip_filter_unittest.cc13
-rw-r--r--net/base/io_buffer.h32
-rw-r--r--net/base/sdch_filter_unittest.cc4
-rw-r--r--net/build/net.vcproj4
-rw-r--r--net/http/http_cache.cc18
-rw-r--r--net/http/http_cache_unittest.cc4
-rw-r--r--net/http/http_network_transaction.cc10
-rw-r--r--net/http/http_network_transaction.h4
-rw-r--r--net/http/http_transaction.h5
-rw-r--r--net/http/http_transaction_unittest.cc6
-rw-r--r--net/http/http_transaction_unittest.h12
-rw-r--r--net/net_lib.scons1
-rw-r--r--net/proxy/proxy_script_fetcher.cc7
-rw-r--r--net/url_request/mime_sniffer_proxy.cc16
-rw-r--r--net/url_request/mime_sniffer_proxy.h9
-rw-r--r--net/url_request/url_request.cc2
-rw-r--r--net/url_request/url_request.h21
-rw-r--r--net/url_request/url_request_file_dir_job.cc8
-rw-r--r--net/url_request/url_request_file_dir_job.h4
-rw-r--r--net/url_request/url_request_file_job.cc6
-rw-r--r--net/url_request/url_request_file_job.h2
-rw-r--r--net/url_request/url_request_http_job.cc3
-rw-r--r--net/url_request/url_request_http_job.h2
-rw-r--r--net/url_request/url_request_inet_job.cc4
-rw-r--r--net/url_request/url_request_inet_job.h2
-rw-r--r--net/url_request/url_request_job.cc10
-rw-r--r--net/url_request/url_request_job.h7
-rw-r--r--net/url_request/url_request_simple_job.cc4
-rw-r--r--net/url_request/url_request_simple_job.h2
-rw-r--r--net/url_request/url_request_test_job.cc5
-rw-r--r--net/url_request/url_request_test_job.h4
-rw-r--r--net/url_request/url_request_unittest.h15
36 files changed, 187 insertions, 98 deletions
diff --git a/net/base/bzip2_filter_unittest.cc b/net/base/bzip2_filter_unittest.cc
index e246726..ae80305 100644
--- a/net/base/bzip2_filter_unittest.cc
+++ b/net/base/bzip2_filter_unittest.cc
@@ -122,7 +122,7 @@ class BZip2FilterUnitTest : public PlatformTest {
break;
encode_data_len = std::min(encode_avail_size,
filter->stream_buffer_size());
- memcpy(filter->stream_buffer(), encode_next, encode_data_len);
+ memcpy(filter->stream_buffer()->data(), encode_next, encode_data_len);
filter->FlushStreamBuffer(encode_data_len);
encode_next += encode_data_len;
encode_avail_size -= encode_data_len;
@@ -162,7 +162,7 @@ class BZip2FilterUnitTest : public PlatformTest {
int source_len,
char* dest,
int* dest_len) {
- memcpy(filter->stream_buffer(), source, source_len);
+ memcpy(filter->stream_buffer()->data(), source, source_len);
filter->FlushStreamBuffer(source_len);
return filter->ReadData(dest, dest_len);
}
@@ -186,7 +186,8 @@ TEST_F(BZip2FilterUnitTest, DecodeBZip2) {
filter_types.push_back(Filter::FILTER_TYPE_BZIP2);
scoped_ptr<Filter> filter(Filter::Factory(filter_types, kDefaultBufferSize));
ASSERT_TRUE(filter.get());
- memcpy(filter->stream_buffer(), bzip2_encode_buffer_, bzip2_encode_len_);
+ memcpy(filter->stream_buffer()->data(), bzip2_encode_buffer_,
+ bzip2_encode_len_);
filter->FlushStreamBuffer(bzip2_encode_len_);
char bzip2_decode_buffer[kDefaultBufferSize];
diff --git a/net/base/completion_callback.h b/net/base/completion_callback.h
index 4013f71..f9bb233 100644
--- a/net/base/completion_callback.h
+++ b/net/base/completion_callback.h
@@ -6,6 +6,7 @@
#define NET_BASE_COMPLETION_CALLBACK_H__
#include "base/task.h"
+#include "net/base/io_buffer.h"
namespace net {
@@ -41,8 +42,23 @@ class CancelableCompletionCallback :
is_canceled_ = true;
}
+ // Attaches the given buffer to this callback so it is valid until the
+ // operation completes. TODO(rvargas): This is a temporal fix for bug 5325
+ // while I send IOBuffer to the lower layers of code.
+ void UseBuffer(net::IOBuffer* buffer) {
+ DCHECK(!buffer_.get());
+ buffer_ = buffer;
+ }
+
+ // The callback is not expected anymore so release the buffer.
+ void ReleaseBuffer() {
+ DCHECK(buffer_.get());
+ buffer_ = NULL;
+ }
+
virtual void RunWithParams(const Tuple1<int>& params) {
if (is_canceled_) {
+ CancelableCompletionCallback<T>::ReleaseBuffer();
base::RefCounted<CancelableCompletionCallback<T> >::Release();
} else {
CompletionCallbackImpl<T>::RunWithParams(params);
@@ -50,6 +66,7 @@ class CancelableCompletionCallback :
}
private:
+ scoped_refptr<net::IOBuffer> buffer_;
bool is_canceled_;
};
diff --git a/net/base/filter.cc b/net/base/filter.cc
index 57a13ef..76ee9d9 100644
--- a/net/base/filter.cc
+++ b/net/base/filter.cc
@@ -228,7 +228,7 @@ bool Filter::InitBuffer(int buffer_size) {
if (buffer_size < 0 || stream_buffer())
return false;
- stream_buffer_.reset(new char[buffer_size]);
+ stream_buffer_ = new net::IOBuffer(buffer_size);
if (stream_buffer()) {
stream_buffer_size_ = buffer_size;
@@ -275,9 +275,9 @@ Filter::FilterStatus Filter::ReadData(char* dest_buffer, int* dest_len) {
return next_filter_->ReadData(dest_buffer, dest_len);
if (next_filter_->last_status() == FILTER_NEED_MORE_DATA) {
// Push data into next filter's input.
- char* next_buffer = next_filter_->stream_buffer();
+ net::IOBuffer* next_buffer = next_filter_->stream_buffer();
int next_size = next_filter_->stream_buffer_size();
- last_status_ = ReadFilteredData(next_buffer, &next_size);
+ last_status_ = ReadFilteredData(next_buffer->data(), &next_size);
next_filter_->FlushStreamBuffer(next_size);
switch (last_status_) {
case FILTER_ERROR:
@@ -306,7 +306,7 @@ bool Filter::FlushStreamBuffer(int stream_data_len) {
if (!stream_buffer() || stream_data_len_)
return false;
- next_stream_data_ = stream_buffer();
+ next_stream_data_ = stream_buffer()->data();
stream_data_len_ = stream_data_len;
return true;
}
diff --git a/net/base/filter.h b/net/base/filter.h
index 71dc438..87ef6fd 100644
--- a/net/base/filter.h
+++ b/net/base/filter.h
@@ -35,6 +35,7 @@
#include "base/basictypes.h"
#include "base/scoped_ptr.h"
#include "base/time.h"
+#include "net/base/io_buffer.h"
#include "googleurl/src/gurl.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
@@ -91,8 +92,8 @@ class Filter {
// next_filter_, then it obtains data from this specific filter.
FilterStatus ReadData(char* dest_buffer, int* dest_len);
- // Returns a pointer to the beginning of stream_buffer_.
- char* stream_buffer() const { return stream_buffer_.get(); }
+ // Returns a pointer to the stream_buffer_.
+ net::IOBuffer* stream_buffer() const { return stream_buffer_.get(); }
// Returns the maximum size of stream_buffer_ in number of chars.
int stream_buffer_size() const { return stream_buffer_size_; }
@@ -177,7 +178,7 @@ class Filter {
bool was_cached() const { return was_cached_; }
// Buffer to hold the data to be filtered.
- scoped_array<char> stream_buffer_;
+ scoped_refptr<net::IOBuffer> stream_buffer_;
// Maximum size of stream_buffer_ in number of chars.
int stream_buffer_size_;
diff --git a/net/base/gzip_filter_unittest.cc b/net/base/gzip_filter_unittest.cc
index 1fe4ee4..654ef40 100644
--- a/net/base/gzip_filter_unittest.cc
+++ b/net/base/gzip_filter_unittest.cc
@@ -174,7 +174,7 @@ class GZipUnitTest : public PlatformTest {
int encode_data_len;
encode_data_len = std::min(encode_avail_size,
filter->stream_buffer_size());
- memcpy(filter->stream_buffer(), encode_next, encode_data_len);
+ memcpy(filter->stream_buffer()->data(), encode_next, encode_data_len);
filter->FlushStreamBuffer(encode_data_len);
encode_next += encode_data_len;
encode_avail_size -= encode_data_len;
@@ -208,7 +208,7 @@ class GZipUnitTest : public PlatformTest {
// into the buffer.
int DecodeAllWithFilter(Filter* filter, const char* source, int source_len,
char* dest, int* dest_len) {
- memcpy(filter->stream_buffer(), source, source_len);
+ memcpy(filter->stream_buffer()->data(), source, source_len);
filter->FlushStreamBuffer(source_len);
return filter->ReadData(dest, dest_len);
}
@@ -232,7 +232,8 @@ TEST_F(GZipUnitTest, DecodeDeflate) {
filter_types.push_back(Filter::FILTER_TYPE_DEFLATE);
scoped_ptr<Filter> filter(Filter::Factory(filter_types, kDefaultBufferSize));
ASSERT_TRUE(filter.get());
- memcpy(filter->stream_buffer(), deflate_encode_buffer_, deflate_encode_len_);
+ memcpy(filter->stream_buffer()->data(), deflate_encode_buffer_,
+ deflate_encode_len_);
filter->FlushStreamBuffer(deflate_encode_len_);
char deflate_decode_buffer[kDefaultBufferSize];
@@ -251,7 +252,8 @@ TEST_F(GZipUnitTest, DecodeGZip) {
filter_types.push_back(Filter::FILTER_TYPE_GZIP);
scoped_ptr<Filter> filter(Filter::Factory(filter_types, kDefaultBufferSize));
ASSERT_TRUE(filter.get());
- memcpy(filter->stream_buffer(), gzip_encode_buffer_, gzip_encode_len_);
+ memcpy(filter->stream_buffer()->data(), gzip_encode_buffer_,
+ gzip_encode_len_);
filter->FlushStreamBuffer(gzip_encode_len_);
char gzip_decode_buffer[kDefaultBufferSize];
@@ -275,7 +277,8 @@ TEST_F(GZipUnitTest, DecodeGZipWithMistakenSdch) {
filter_types.push_back(Filter::FILTER_TYPE_GZIP);
scoped_ptr<Filter> filter(Filter::Factory(filter_types, kDefaultBufferSize));
ASSERT_TRUE(filter.get());
- memcpy(filter->stream_buffer(), gzip_encode_buffer_, gzip_encode_len_);
+ memcpy(filter->stream_buffer()->data(), gzip_encode_buffer_,
+ gzip_encode_len_);
filter->FlushStreamBuffer(gzip_encode_len_);
char gzip_decode_buffer[kDefaultBufferSize];
diff --git a/net/base/io_buffer.h b/net/base/io_buffer.h
new file mode 100644
index 0000000..b390103
--- /dev/null
+++ b/net/base/io_buffer.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_BASE_IO_BUFFER_H_
+#define NET_BASE_IO_BUFFER_H_
+
+#include "base/ref_counted.h"
+
+namespace net {
+
+// This is a simple wrapper around a buffer that provides ref counting for
+// easier asynchronous IO handling.
+class IOBuffer : public base::RefCountedThreadSafe<IOBuffer> {
+ public:
+ explicit IOBuffer(int buffer_size) {
+ data_ = new char[buffer_size];
+ }
+ explicit IOBuffer(char* buffer) : data_(buffer) {}
+ virtual ~IOBuffer() {
+ delete[] data_;
+ }
+
+ char* data() { return data_; }
+
+ protected:
+ char* data_;
+};
+
+} // namespace net
+
+#endif // NET_BASE_IO_BUFFER_H_
diff --git a/net/base/sdch_filter_unittest.cc b/net/base/sdch_filter_unittest.cc
index 21d9aee..b5c31c2 100644
--- a/net/base/sdch_filter_unittest.cc
+++ b/net/base/sdch_filter_unittest.cc
@@ -101,7 +101,7 @@ static bool FilterTestData(const std::string& source,
do {
int copy_amount = std::min(input_amount, source.size() - source_index);
if (copy_amount > 0 && status == Filter::FILTER_NEED_MORE_DATA) {
- memcpy(filter->stream_buffer(), source.data() + source_index,
+ memcpy(filter->stream_buffer()->data(), source.data() + source_index,
copy_amount);
filter->FlushStreamBuffer(copy_amount);
source_index += copy_amount;
@@ -152,7 +152,7 @@ TEST_F(SdchFilterTest, BasicBadDictionary) {
// Dictionary hash is 8 characters followed by a null.
std::string dictionary_hash_prefix("123");
- char* input_buffer = filter->stream_buffer();
+ char* input_buffer = filter->stream_buffer()->data();
int input_buffer_size = filter->stream_buffer_size();
EXPECT_EQ(kInputBufferSize, input_buffer_size);
diff --git a/net/build/net.vcproj b/net/build/net.vcproj
index 7faf5c7..811eb1b 100644
--- a/net/build/net.vcproj
+++ b/net/build/net.vcproj
@@ -309,6 +309,10 @@
>
</File>
<File
+ RelativePath="..\base\io_buffer.h"
+ >
+ </File>
+ <File
RelativePath="..\base\listen_socket.cc"
>
</File>
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc
index 7edc0a8..fe0a130 100644
--- a/net/http/http_cache.cc
+++ b/net/http/http_cache.cc
@@ -190,7 +190,7 @@ class HttpCache::Transaction : public HttpTransaction {
virtual int RestartWithAuth(const std::wstring& username,
const std::wstring& password,
CompletionCallback* callback);
- virtual int Read(char* buf, int buf_len, CompletionCallback*);
+ virtual int Read(IOBuffer* buf, int buf_len, CompletionCallback*);
virtual const HttpResponseInfo* GetResponseInfo() const;
virtual LoadState GetLoadState() const;
virtual uint64 GetUploadProgress(void) const;
@@ -411,7 +411,7 @@ int HttpCache::Transaction::RestartWithAuth(
return rv;
}
-int HttpCache::Transaction::Read(char* buf, int buf_len,
+int HttpCache::Transaction::Read(IOBuffer* buf, int buf_len,
CompletionCallback* callback) {
DCHECK(buf);
DCHECK(buf_len > 0);
@@ -435,20 +435,23 @@ int HttpCache::Transaction::Read(char* buf, int buf_len,
case WRITE:
DCHECK(network_trans_.get());
rv = network_trans_->Read(buf, buf_len, &network_read_callback_);
- read_buf_ = buf;
+ read_buf_ = buf->data();
if (rv >= 0)
OnNetworkReadCompleted(rv);
break;
case READ:
DCHECK(entry_);
- cache_read_callback_->AddRef(); // Balanced in OnCacheReadCompleted
+ cache_read_callback_->AddRef(); // Balanced in OnCacheReadCompleted.
+ cache_read_callback_->UseBuffer(buf);
rv = entry_->disk_entry->ReadData(kResponseContentIndex, read_offset_,
- buf, buf_len, cache_read_callback_);
- read_buf_ = buf;
+ buf->data(), buf_len,
+ cache_read_callback_);
+ read_buf_ = buf->data();
if (rv >= 0) {
OnCacheReadCompleted(rv);
} else if (rv != ERR_IO_PENDING) {
cache_read_callback_->Release();
+ cache_read_callback_->ReleaseBuffer();
}
break;
default:
@@ -903,7 +906,8 @@ void HttpCache::Transaction::OnNetworkReadCompleted(int result) {
void HttpCache::Transaction::OnCacheReadCompleted(int result) {
DCHECK(cache_);
- cache_read_callback_->Release(); // Balance the AddRef() from Start()
+ cache_read_callback_->Release(); // Balance the AddRef() from Start().
+ cache_read_callback_->ReleaseBuffer();
if (result > 0) {
read_offset_ += result;
diff --git a/net/http/http_cache_unittest.cc b/net/http/http_cache_unittest.cc
index be6986e..0a71854 100644
--- a/net/http/http_cache_unittest.cc
+++ b/net/http/http_cache_unittest.cc
@@ -676,8 +676,8 @@ TEST(HttpCache, SimpleGET_AbandonedCacheRead) {
rv = callback.WaitForResult();
ASSERT_EQ(net::OK, rv);
- char buf[256];
- rv = trans->Read(buf, sizeof(buf), &callback);
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(256);
+ rv = trans->Read(buf, 256, &callback);
EXPECT_EQ(net::ERR_IO_PENDING, rv);
// Test that destroying the transaction while it is reading from the cache
diff --git a/net/http/http_network_transaction.cc b/net/http/http_network_transaction.cc
index 9d6ed1b..2e96deb 100644
--- a/net/http/http_network_transaction.cc
+++ b/net/http/http_network_transaction.cc
@@ -52,7 +52,6 @@ HttpNetworkTransaction::HttpNetworkTransaction(HttpNetworkSession* session,
header_buf_http_offset_(-1),
content_length_(-1), // -1 means unspecified.
content_read_(0),
- read_buf_(NULL),
read_buf_len_(0),
next_state_(STATE_NONE) {
#if defined(OS_WIN)
@@ -134,7 +133,7 @@ void HttpNetworkTransaction::PrepareForAuthRestart(HttpAuth::Target target) {
ResetStateForRestart();
}
-int HttpNetworkTransaction::Read(char* buf, int buf_len,
+int HttpNetworkTransaction::Read(IOBuffer* buf, int buf_len,
CompletionCallback* callback) {
DCHECK(response_.headers);
DCHECK(buf);
@@ -726,7 +725,7 @@ int HttpNetworkTransaction::DoReadBody() {
// We may have some data remaining in the header buffer.
if (header_buf_.get() && header_buf_body_offset_ < header_buf_len_) {
int n = std::min(read_buf_len_, header_buf_len_ - header_buf_body_offset_);
- memcpy(read_buf_, header_buf_.get() + header_buf_body_offset_, n);
+ memcpy(read_buf_->data(), header_buf_.get() + header_buf_body_offset_, n);
header_buf_body_offset_ += n;
if (header_buf_body_offset_ == header_buf_len_) {
header_buf_.reset();
@@ -737,7 +736,8 @@ int HttpNetworkTransaction::DoReadBody() {
return n;
}
- return connection_.socket()->Read(read_buf_, read_buf_len_, &io_callback_);
+ return connection_.socket()->Read(read_buf_->data(), read_buf_len_,
+ &io_callback_);
}
int HttpNetworkTransaction::DoReadBodyComplete(int result) {
@@ -747,7 +747,7 @@ int HttpNetworkTransaction::DoReadBodyComplete(int result) {
// Filter incoming data if appropriate. FilterBuf may return an error.
if (result > 0 && chunked_decoder_.get()) {
- result = chunked_decoder_->FilterBuf(read_buf_, result);
+ result = chunked_decoder_->FilterBuf(read_buf_->data(), result);
if (result == 0 && !chunked_decoder_->reached_eof()) {
// Don't signal completion of the Read call yet or else it'll look like
// we received end-of-file. Wait for more data.
diff --git a/net/http/http_network_transaction.h b/net/http/http_network_transaction.h
index 487ddd3..9a4c619 100644
--- a/net/http/http_network_transaction.h
+++ b/net/http/http_network_transaction.h
@@ -39,7 +39,7 @@ class HttpNetworkTransaction : public HttpTransaction {
virtual int RestartWithAuth(const std::wstring& username,
const std::wstring& password,
CompletionCallback* callback);
- virtual int Read(char* buf, int buf_len, CompletionCallback* callback);
+ virtual int Read(IOBuffer* buf, int buf_len, CompletionCallback* callback);
virtual const HttpResponseInfo* GetResponseInfo() const;
virtual LoadState GetLoadState() const;
virtual uint64 GetUploadProgress() const;
@@ -259,7 +259,7 @@ class HttpNetworkTransaction : public HttpTransaction {
scoped_ptr<HttpChunkedDecoder> chunked_decoder_;
// User buffer and length passed to the Read method.
- char* read_buf_;
+ scoped_refptr<IOBuffer> read_buf_;
int read_buf_len_;
enum State {
diff --git a/net/http/http_transaction.h b/net/http/http_transaction.h
index ba2cf29..46c7671 100644
--- a/net/http/http_transaction.h
+++ b/net/http/http_transaction.h
@@ -66,8 +66,11 @@ class HttpTransaction {
// could not be read.
//
// NOTE: The transaction is not responsible for deleting the callback object.
+ // If the operation is not completed immediately, the transaction must acquire
+ // a reference to the provided buffer.
//
- virtual int Read(char* buf, int buf_len, CompletionCallback* callback) = 0;
+ virtual int Read(IOBuffer* buf, int buf_len,
+ CompletionCallback* callback) = 0;
// Returns the response info for this transaction or NULL if the response
// info is not available.
diff --git a/net/http/http_transaction_unittest.cc b/net/http/http_transaction_unittest.cc
index fdcceff..e22c398 100644
--- a/net/http/http_transaction_unittest.cc
+++ b/net/http/http_transaction_unittest.cc
@@ -137,12 +137,12 @@ int ReadTransaction(net::HttpTransaction* trans, std::string* result) {
std::string content;
do {
- char buf[256];
- rv = trans->Read(buf, sizeof(buf), &callback);
+ scoped_refptr<net::IOBuffer> buf = new net::IOBuffer(256);
+ rv = trans->Read(buf, 256, &callback);
if (rv == net::ERR_IO_PENDING)
rv = callback.WaitForResult();
if (rv > 0) {
- content.append(buf, rv);
+ content.append(buf->data(), rv);
} else if (rv < 0) {
return rv;
}
diff --git a/net/http/http_transaction_unittest.h b/net/http/http_transaction_unittest.h
index ccdb954..0c26178 100644
--- a/net/http/http_transaction_unittest.h
+++ b/net/http/http_transaction_unittest.h
@@ -149,7 +149,7 @@ class TestTransactionConsumer : public CallbackRunner< Tuple1<int> > {
if (result <= 0) {
DidFinish(result);
} else {
- content_.append(read_buf_, result);
+ content_.append(read_buf_->data(), result);
Read();
}
}
@@ -163,7 +163,8 @@ class TestTransactionConsumer : public CallbackRunner< Tuple1<int> > {
void Read() {
state_ = READING;
- int result = trans_->Read(read_buf_, sizeof(read_buf_), this);
+ read_buf_ = new net::IOBuffer(1024);
+ int result = trans_->Read(read_buf_, 1024, this);
if (result != net::ERR_IO_PENDING)
DidRead(result);
}
@@ -177,7 +178,7 @@ class TestTransactionConsumer : public CallbackRunner< Tuple1<int> > {
scoped_ptr<net::HttpTransaction> trans_;
std::string content_;
- char read_buf_[1024];
+ scoped_refptr<net::IOBuffer> read_buf_;
int error_;
static int quit_counter_;
@@ -237,11 +238,12 @@ class MockNetworkTransaction : public net::HttpTransaction {
return net::ERR_FAILED;
}
- virtual int Read(char* buf, int buf_len, net::CompletionCallback* callback) {
+ virtual int Read(net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* callback) {
int data_len = static_cast<int>(data_.size());
int num = std::min(buf_len, data_len - data_cursor_);
if (num) {
- memcpy(buf, data_.data() + data_cursor_, num);
+ memcpy(buf->data(), data_.data() + data_cursor_, num);
data_cursor_ += num;
}
if (test_mode_ & TEST_MODE_SYNC_NET_READ)
diff --git a/net/net_lib.scons b/net/net_lib.scons
index 043db5c..099c46b 100644
--- a/net/net_lib.scons
+++ b/net/net_lib.scons
@@ -62,6 +62,7 @@ input_files = ChromeFileList([
'base/gzip_header.h',
'base/host_resolver.cc',
'base/host_resolver.h',
+ 'base/io_buffer.h',
'base/listen_socket.cc',
'base/listen_socket.h',
'base/load_flags.h',
diff --git a/net/proxy/proxy_script_fetcher.cc b/net/proxy/proxy_script_fetcher.cc
index 4f9f603..b48ee1a 100644
--- a/net/proxy/proxy_script_fetcher.cc
+++ b/net/proxy/proxy_script_fetcher.cc
@@ -6,7 +6,9 @@
#include "base/compiler_specific.h"
#include "base/message_loop.h"
+#include "base/ref_counted.h"
#include "base/string_util.h"
+#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/url_request/url_request.h"
@@ -77,7 +79,7 @@ class ProxyScriptFetcherImpl : public ProxyScriptFetcher,
// Buffer that URLRequest writes into.
enum { kBufSize = 4096 };
- char buf_[kBufSize];
+ scoped_refptr<net::IOBuffer> buf_;
// The next ID to use for |cur_request_| (monotonically increasing).
int next_id_;
@@ -105,6 +107,7 @@ ProxyScriptFetcherImpl::ProxyScriptFetcherImpl(
URLRequestContext* url_request_context)
: ALLOW_THIS_IN_INITIALIZER_LIST(task_factory_(this)),
url_request_context_(url_request_context),
+ buf_(new net::IOBuffer(kBufSize)),
next_id_(0),
cur_request_(NULL),
cur_request_id_(0),
@@ -217,7 +220,7 @@ void ProxyScriptFetcherImpl::OnReadCompleted(URLRequest* request,
request->Cancel();
return;
}
- result_bytes_->append(buf_, num_bytes);
+ result_bytes_->append(buf_->data(), num_bytes);
ReadBody(request);
} else { // Error while reading, or EOF
OnResponseCompleted(request);
diff --git a/net/url_request/mime_sniffer_proxy.cc b/net/url_request/mime_sniffer_proxy.cc
index 24b1fbe..3a8d9a9 100644
--- a/net/url_request/mime_sniffer_proxy.cc
+++ b/net/url_request/mime_sniffer_proxy.cc
@@ -6,10 +6,13 @@
#include "net/base/mime_sniffer.h"
+static const int kBufferSize = 1024;
+
MimeSnifferProxy::MimeSnifferProxy(URLRequest* request,
URLRequest::Delegate* delegate)
: request_(request), delegate_(delegate),
- sniff_content_(false), error_(false) {
+ sniff_content_(false), error_(false),
+ buf_(new net::IOBuffer(kBufferSize)) {
request->set_delegate(this);
}
@@ -20,7 +23,7 @@ void MimeSnifferProxy::OnResponseStarted(URLRequest* request) {
// We need to read content before we know the mime type,
// so we don't call OnResponseStarted.
sniff_content_ = true;
- if (request_->Read(buf_, sizeof(buf_), &bytes_read_) && bytes_read_) {
+ if (request_->Read(buf_, kBufferSize, &bytes_read_) && bytes_read_) {
OnReadCompleted(request, bytes_read_);
} else if (!request_->status().is_io_pending()) {
error_ = true;
@@ -32,7 +35,8 @@ void MimeSnifferProxy::OnResponseStarted(URLRequest* request) {
delegate_->OnResponseStarted(request);
}
-bool MimeSnifferProxy::Read(char* buf, int max_bytes, int *bytes_read) {
+bool MimeSnifferProxy::Read(net::IOBuffer* buf, int max_bytes,
+ int *bytes_read) {
if (sniff_content_) {
// This is the first call to Read() after we've sniffed content.
// Return our local buffer or the error we ran into.
@@ -43,7 +47,7 @@ bool MimeSnifferProxy::Read(char* buf, int max_bytes, int *bytes_read) {
return false;
}
- memcpy(buf, buf_, bytes_read_);
+ memcpy(buf->data(), buf_->data(), bytes_read_);
*bytes_read = bytes_read_;
return true;
}
@@ -57,8 +61,8 @@ void MimeSnifferProxy::OnReadCompleted(URLRequest* request, int bytes_read) {
std::string type_hint;
request_->GetMimeType(&type_hint);
bytes_read_ = bytes_read;
- net::SniffMimeType(
- buf_, bytes_read_, request_->url(), type_hint, &mime_type_);
+ net::SniffMimeType(buf_->data(), bytes_read_, request_->url(),
+ type_hint, &mime_type_);
} else {
error_ = true;
}
diff --git a/net/url_request/mime_sniffer_proxy.h b/net/url_request/mime_sniffer_proxy.h
index 0029a80..898ea60 100644
--- a/net/url_request/mime_sniffer_proxy.h
+++ b/net/url_request/mime_sniffer_proxy.h
@@ -19,6 +19,10 @@
// 2) ms_->mime_type() -- returns the sniffed mime type of the data;
// valid after OnResponseStarted() is called.
+#ifndef NET_URL_REQUEST_MIME_SNIFFER_PROXY_H_
+#define NET_URL_REQUEST_MIME_SNIFFER_PROXY_H_
+
+#include "net/base/io_buffer.h"
#include "net/url_request/url_request.h"
class MimeSnifferProxy : public URLRequest::Delegate {
@@ -48,7 +52,7 @@ class MimeSnifferProxy : public URLRequest::Delegate {
}
// Wrapper around URLRequest::Read.
- bool Read(char* buf, int max_bytes, int *bytes_read);
+ bool Read(net::IOBuffer* buf, int max_bytes, int *bytes_read);
// Return the sniffed mime type of the request. Valid after
// OnResponseStarted() has been called on the delegate.
@@ -69,8 +73,9 @@ class MimeSnifferProxy : public URLRequest::Delegate {
bool error_;
// A buffer for the first bit of the request.
- char buf_[1024];
+ scoped_refptr<net::IOBuffer> buf_;
// The number of bytes we've read into the buffer.
int bytes_read_;
};
+#endif // NET_URL_REQUEST_MIME_SNIFFER_PROXY_H_
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index 87facba..19c9810 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -252,7 +252,7 @@ void URLRequest::CancelWithError(int os_error) {
// about being called recursively.
}
-bool URLRequest::Read(char* dest, int dest_size, int *bytes_read) {
+bool URLRequest::Read(net::IOBuffer* dest, int dest_size, int *bytes_read) {
DCHECK(job_);
DCHECK(bytes_read);
DCHECK(!job_->is_done());
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index 5dfc711..f697362 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -21,6 +21,9 @@
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_status.h"
+namespace net {
+class IOBuffer;
+}
class URLRequestJob;
// This stores the values of the Set-Cookie headers received during the request.
@@ -367,16 +370,14 @@ class URLRequest {
// successful status.
// If data is available, Read will return true, and the data and length will
// be returned immediately. If data is not available, Read returns false,
- // and an asynchronous Read is initiated. The caller guarantees the
- // buffer provided will be available until the Read is finished. The
- // Read is finished when the caller receives the OnReadComplete
- // callback. OnReadComplete will be always be called, even if there
- // was a failure.
+ // and an asynchronous Read is initiated. The Read is finished when
+ // the caller receives the OnReadComplete callback. OnReadComplete will be
+ // always be called, even if there was a failure.
//
- // The buf parameter is a buffer to receive the data. Once the read is
- // initiated, the caller guarantees availability of this buffer until
- // the OnReadComplete is received. The buffer must be at least
- // max_bytes in length.
+ // The buf parameter is a buffer to receive the data. If the operation
+ // completes asynchronously, the implementation will reference the buffer
+ // until OnReadComplete is called. The buffer must be at least max_bytes in
+ // length.
//
// The max_bytes parameter is the maximum number of bytes to read.
//
@@ -386,7 +387,7 @@ class URLRequest {
//
// If a read error occurs, Read returns false and the request->status
// will be set to an error.
- bool Read(char* buf, int max_bytes, int *bytes_read);
+ bool Read(net::IOBuffer* buf, int max_bytes, int *bytes_read);
// One of the following two methods should be called in response to an
// OnAuthRequired() callback (and only then).
diff --git a/net/url_request/url_request_file_dir_job.cc b/net/url_request/url_request_file_dir_job.cc
index df24eab..1608684 100644
--- a/net/url_request/url_request_file_dir_job.cc
+++ b/net/url_request/url_request_file_dir_job.cc
@@ -26,7 +26,6 @@ URLRequestFileDirJob::URLRequestFileDirJob(URLRequest* request,
list_complete_(false),
wrote_header_(false),
read_pending_(false),
- read_buffer_(NULL),
read_buffer_length_(0) {
}
@@ -68,7 +67,7 @@ void URLRequestFileDirJob::Kill() {
lister_->Cancel();
}
-bool URLRequestFileDirJob::ReadRawData(char* buf, int buf_size,
+bool URLRequestFileDirJob::ReadRawData(net::IOBuffer* buf, int buf_size,
int *bytes_read) {
DCHECK(bytes_read);
*bytes_read = 0;
@@ -76,7 +75,7 @@ bool URLRequestFileDirJob::ReadRawData(char* buf, int buf_size,
if (is_done())
return true;
- if (FillReadBuffer(buf, buf_size, bytes_read))
+ if (FillReadBuffer(buf->data(), buf_size, bytes_read))
return true;
// We are waiting for more data
@@ -183,7 +182,8 @@ bool URLRequestFileDirJob::FillReadBuffer(char *buf, int buf_size,
void URLRequestFileDirJob::CompleteRead() {
if (read_pending_) {
int bytes_read;
- if (FillReadBuffer(read_buffer_, read_buffer_length_, &bytes_read)) {
+ if (FillReadBuffer(read_buffer_->data(), read_buffer_length_,
+ &bytes_read)) {
// We completed the read, so reset the read buffer.
read_pending_ = false;
read_buffer_ = NULL;
diff --git a/net/url_request/url_request_file_dir_job.h b/net/url_request/url_request_file_dir_job.h
index 882f967..c3881dc 100644
--- a/net/url_request/url_request_file_dir_job.h
+++ b/net/url_request/url_request_file_dir_job.h
@@ -21,7 +21,7 @@ class URLRequestFileDirJob
virtual void Start();
virtual void StartAsync();
virtual void Kill();
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
virtual bool GetMimeType(std::string* mime_type);
virtual bool GetCharset(std::string* charset);
virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
@@ -55,7 +55,7 @@ class URLRequestFileDirJob
// we wait for IO to complete. When done, we fill the buffer
// manually.
bool read_pending_;
- char *read_buffer_;
+ scoped_refptr<net::IOBuffer> read_buffer_;
int read_buffer_length_;
DISALLOW_EVIL_CONSTRUCTORS(URLRequestFileDirJob);
diff --git a/net/url_request/url_request_file_job.cc b/net/url_request/url_request_file_job.cc
index 994a58c..92e7f87 100644
--- a/net/url_request/url_request_file_job.cc
+++ b/net/url_request/url_request_file_job.cc
@@ -128,12 +128,12 @@ void URLRequestFileJob::Kill() {
URLRequestJob::Kill();
}
-bool URLRequestFileJob::ReadRawData(
- char* dest, int dest_size, int *bytes_read) {
+bool URLRequestFileJob::ReadRawData(net::IOBuffer* dest, int dest_size,
+ int *bytes_read) {
DCHECK_NE(dest_size, 0);
DCHECK(bytes_read);
- int rv = stream_.Read(dest, dest_size, &io_callback_);
+ int rv = stream_.Read(dest->data(), dest_size, &io_callback_);
if (rv >= 0) {
// Data is immediately available.
*bytes_read = rv;
diff --git a/net/url_request/url_request_file_job.h b/net/url_request/url_request_file_job.h
index a00e439..0ccaa5a 100644
--- a/net/url_request/url_request_file_job.h
+++ b/net/url_request/url_request_file_job.h
@@ -20,7 +20,7 @@ class URLRequestFileJob : public URLRequestJob {
virtual void Start();
virtual void Kill();
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
virtual bool GetMimeType(std::string* mime_type);
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index 7728f6c..7a74500 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -348,7 +348,8 @@ bool URLRequestHttpJob::GetMoreData() {
return transaction_.get() && !read_in_progress_;
}
-bool URLRequestHttpJob::ReadRawData(char* buf, int buf_size, int *bytes_read) {
+bool URLRequestHttpJob::ReadRawData(net::IOBuffer* buf, int buf_size,
+ int *bytes_read) {
DCHECK_NE(buf_size, 0);
DCHECK(bytes_read);
DCHECK(!read_in_progress_);
diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h
index eda4b4b..e53db48 100644
--- a/net/url_request/url_request_http_job.h
+++ b/net/url_request/url_request_http_job.h
@@ -53,7 +53,7 @@ class URLRequestHttpJob : public URLRequestJob {
virtual void CancelAuth();
virtual void ContinueDespiteLastError();
virtual bool GetMoreData();
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
// Shadows URLRequestJob's version of this method so we can grab cookies.
void NotifyHeadersComplete();
diff --git a/net/url_request/url_request_inet_job.cc b/net/url_request/url_request_inet_job.cc
index 2d45526..09abfa2 100644
--- a/net/url_request/url_request_inet_job.cc
+++ b/net/url_request/url_request_inet_job.cc
@@ -185,7 +185,7 @@ void URLRequestInetJob::OnIOComplete(const AsyncResult& result) {
}
}
-bool URLRequestInetJob::ReadRawData(char* dest, int dest_size,
+bool URLRequestInetJob::ReadRawData(net::IOBuffer* dest, int dest_size,
int *bytes_read) {
if (is_done())
return 0;
@@ -196,7 +196,7 @@ bool URLRequestInetJob::ReadRawData(char* dest, int dest_size,
*bytes_read = 0;
- int result = CallInternetRead(dest, dest_size, bytes_read);
+ int result = CallInternetRead(dest->data(), dest_size, bytes_read);
if (result == ERROR_SUCCESS) {
DLOG(INFO) << "read " << *bytes_read << " bytes";
if (*bytes_read == 0)
diff --git a/net/url_request/url_request_inet_job.h b/net/url_request/url_request_inet_job.h
index 6341105..bef0c4f 100644
--- a/net/url_request/url_request_inet_job.h
+++ b/net/url_request/url_request_inet_job.h
@@ -29,7 +29,7 @@ class URLRequestInetJob : public URLRequestJob {
}
virtual void Kill();
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
// URLRequestJob Authentication methods
virtual void SetAuth(const std::wstring& username,
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 6e6a1df..659156b 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -8,6 +8,7 @@
#include "base/string_util.h"
#include "googleurl/src/gurl.h"
#include "net/base/auth.h"
+#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_job_metrics.h"
@@ -97,7 +98,7 @@ void URLRequestJob::ContinueDespiteLastError() {
// This function calls ReadData to get stream data. If a filter exists, passes
// the data to the attached filter. Then returns the output from filter back to
// the caller.
-bool URLRequestJob::Read(char* buf, int buf_size, int *bytes_read) {
+bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) {
bool rv = false;
DCHECK_LT(buf_size, 1000000); // sanity check
@@ -140,7 +141,7 @@ bool URLRequestJob::ReadRawDataForFilter(int *bytes_read) {
// TODO(mbelshe): is it possible that the filter needs *MORE* data
// when there is some data already in the buffer?
if (!filter_->stream_data_len() && !is_done()) {
- char* stream_buffer = filter_->stream_buffer();
+ net::IOBuffer* stream_buffer = filter_->stream_buffer();
int stream_buffer_size = filter_->stream_buffer_size();
rv = ReadRawData(stream_buffer, stream_buffer_size, bytes_read);
if (rv && *bytes_read > 0)
@@ -186,7 +187,7 @@ bool URLRequestJob::ReadFilteredData(int *bytes_read) {
// Get filtered data
int filtered_data_len = read_buffer_len_;
Filter::FilterStatus status;
- status = filter_->ReadData(read_buffer_, &filtered_data_len);
+ status = filter_->ReadData(read_buffer_->data(), &filtered_data_len);
switch (status) {
case Filter::FILTER_DONE: {
*bytes_read = filtered_data_len;
@@ -242,7 +243,8 @@ bool URLRequestJob::ReadFilteredData(int *bytes_read) {
return rv;
}
-bool URLRequestJob::ReadRawData(char* buf, int buf_size, int *bytes_read) {
+bool URLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
+ int *bytes_read) {
DCHECK(bytes_read);
*bytes_read = 0;
NotifyDone(URLRequestStatus());
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
index 43fa866..0a5744b 100644
--- a/net/url_request/url_request_job.h
+++ b/net/url_request/url_request_job.h
@@ -17,6 +17,7 @@
namespace net {
class HttpResponseInfo;
+class IOBuffer;
class UploadData;
}
@@ -78,7 +79,7 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob> {
// bytes read, 0 when there is no more data, or -1 if there was an error.
// This is just the backend for URLRequest::Read, see that function for more
// info.
- bool Read(char* buf, int buf_size, int *bytes_read);
+ bool Read(net::IOBuffer* buf, int buf_size, int *bytes_read);
// Called to fetch the current load state for the job.
virtual net::LoadState GetLoadState() const { return net::LOAD_STATE_IDLE; }
@@ -231,7 +232,7 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob> {
// If async IO is pending, the status of the request will be
// URLRequestStatus::IO_PENDING, and buf must remain available until the
// operation is completed. See comments on URLRequest::Read for more info.
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
// Informs the filter that data has been read into its buffer
void FilteredDataRead(int bytes_read);
@@ -289,7 +290,7 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob> {
// processing the filtered data, we return the data in the caller's buffer.
// While the async IO is in progress, we save the user buffer here, and
// when the IO completes, we fill this in.
- char *read_buffer_;
+ net::IOBuffer *read_buffer_;
int read_buffer_len_;
// Used by HandleResponseIfNecessary to track whether we've sent the
diff --git a/net/url_request/url_request_simple_job.cc b/net/url_request/url_request_simple_job.cc
index ae078b3..a4ef4e1 100644
--- a/net/url_request/url_request_simple_job.cc
+++ b/net/url_request/url_request_simple_job.cc
@@ -29,13 +29,13 @@ bool URLRequestSimpleJob::GetCharset(std::string* charset) {
return true;
}
-bool URLRequestSimpleJob::ReadRawData(char* buf, int buf_size,
+bool URLRequestSimpleJob::ReadRawData(net::IOBuffer* buf, int buf_size,
int* bytes_read) {
DCHECK(bytes_read);
int remaining = static_cast<int>(data_.size()) - data_offset_;
if (buf_size > remaining)
buf_size = remaining;
- memcpy(buf, data_.data() + data_offset_, buf_size);
+ memcpy(buf->data(), data_.data() + data_offset_, buf_size);
data_offset_ += buf_size;
*bytes_read = buf_size;
return true;
diff --git a/net/url_request/url_request_simple_job.h b/net/url_request/url_request_simple_job.h
index 4cb847c..183598a 100644
--- a/net/url_request/url_request_simple_job.h
+++ b/net/url_request/url_request_simple_job.h
@@ -13,7 +13,7 @@ class URLRequestSimpleJob : public URLRequestJob {
URLRequestSimpleJob(URLRequest* request);
virtual void Start();
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
virtual bool GetMimeType(std::string* mime_type);
virtual bool GetCharset(std::string* charset);
diff --git a/net/url_request/url_request_test_job.cc b/net/url_request/url_request_test_job.cc
index d544ce4..eda77a7 100644
--- a/net/url_request/url_request_test_job.cc
+++ b/net/url_request/url_request_test_job.cc
@@ -93,7 +93,8 @@ void URLRequestTestJob::StartAsync() {
this->NotifyHeadersComplete();
}
-bool URLRequestTestJob::ReadRawData(char* buf, int buf_size, int *bytes_read) {
+bool URLRequestTestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
+ int *bytes_read) {
if (stage_ == WAITING) {
async_buf_ = buf;
async_buf_size_ = buf_size;
@@ -112,7 +113,7 @@ bool URLRequestTestJob::ReadRawData(char* buf, int buf_size, int *bytes_read) {
if (to_read + offset_ > static_cast<int>(data_.length()))
to_read = static_cast<int>(data_.length()) - offset_;
- memcpy(buf, &data_.c_str()[offset_], to_read);
+ memcpy(buf->data(), &data_.c_str()[offset_], to_read);
offset_ += to_read;
*bytes_read = to_read;
diff --git a/net/url_request/url_request_test_job.h b/net/url_request/url_request_test_job.h
index ad69123..4cbf37e 100644
--- a/net/url_request/url_request_test_job.h
+++ b/net/url_request/url_request_test_job.h
@@ -51,7 +51,7 @@ class URLRequestTestJob : public URLRequestJob {
// Job functions
virtual void Start();
- virtual bool ReadRawData(char* buf, int buf_size, int *bytes_read);
+ virtual bool ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read);
virtual void Kill();
virtual bool GetMimeType(std::string* mime_type);
virtual void GetResponseInfo(net::HttpResponseInfo* info);
@@ -78,7 +78,7 @@ class URLRequestTestJob : public URLRequestJob {
int offset_;
// Holds the buffer for an asynchronous ReadRawData call
- char* async_buf_;
+ net::IOBuffer* async_buf_;
int async_buf_size_;
};
diff --git a/net/url_request/url_request_unittest.h b/net/url_request/url_request_unittest.h
index 58a0218..9389ce8 100644
--- a/net/url_request/url_request_unittest.h
+++ b/net/url_request/url_request_unittest.h
@@ -21,6 +21,7 @@
#include "base/thread.h"
#include "base/time.h"
#include "base/waitable_event.h"
+#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/http/http_network_layer.h"
#include "net/url_request/url_request.h"
@@ -62,7 +63,8 @@ class TestDelegate : public URLRequest::Delegate {
received_bytes_count_(0),
received_redirect_count_(0),
received_data_before_response_(false),
- request_failed_(false) {
+ request_failed_(false),
+ buf_(new net::IOBuffer(kBufferSize)) {
}
virtual void OnReceivedRedirect(URLRequest* request, const GURL& new_url) {
@@ -87,7 +89,7 @@ class TestDelegate : public URLRequest::Delegate {
} else {
// Initiate the first read.
int bytes_read = 0;
- if (request->Read(buf_, sizeof(buf_), &bytes_read))
+ if (request->Read(buf_, kBufferSize, &bytes_read))
OnReadCompleted(request, bytes_read);
else if (!request->status().is_io_pending())
OnResponseCompleted(request);
@@ -109,15 +111,15 @@ class TestDelegate : public URLRequest::Delegate {
received_bytes_count_ += bytes_read;
// consume the data
- data_received_.append(buf_, bytes_read);
+ data_received_.append(buf_->data(), bytes_read);
}
// If it was not end of stream, request to read more.
if (request->status().is_success() && bytes_read > 0) {
bytes_read = 0;
- while (request->Read(buf_, sizeof(buf_), &bytes_read)) {
+ while (request->Read(buf_, kBufferSize, &bytes_read)) {
if (bytes_read > 0) {
- data_received_.append(buf_, bytes_read);
+ data_received_.append(buf_->data(), bytes_read);
received_bytes_count_ += bytes_read;
} else {
break;
@@ -173,6 +175,7 @@ class TestDelegate : public URLRequest::Delegate {
bool request_failed() const { return request_failed_; }
private:
+ static const int kBufferSize = 4096;
// options for controlling behavior
bool cancel_in_rr_;
bool cancel_in_rs_;
@@ -192,7 +195,7 @@ class TestDelegate : public URLRequest::Delegate {
std::string data_received_;
// our read buffer
- char buf_[4096];
+ scoped_refptr<net::IOBuffer> buf_;
};
// This object bounds the lifetime of an external python-based HTTP/FTP server