summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorsatish@chromium.org <satish@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-25 07:17:11 +0000
committersatish@chromium.org <satish@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-25 07:17:11 +0000
commit699efe606ab0e0995e3613f5fb880babbc66e8fc (patch)
tree7a06b83d08b940a4c04c96ee994a600399c7a5ed /net
parent2d6a4beb5df1d9377a141b12dbb2bf6225cb3d5d (diff)
downloadchromium_src-699efe606ab0e0995e3613f5fb880babbc66e8fc.zip
chromium_src-699efe606ab0e0995e3613f5fb880babbc66e8fc.tar.gz
chromium_src-699efe606ab0e0995e3613f5fb880babbc66e8fc.tar.bz2
Prototype of chunked transfer encoded POST.
BUG=none TEST=none Review URL: http://codereview.chromium.org/6134003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@72471 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/upload_data.cc38
-rw-r--r--net/base/upload_data.h39
-rw-r--r--net/base/upload_data_stream.cc38
-rw-r--r--net/base/upload_data_stream.h13
-rw-r--r--net/base/upload_data_stream_unittest.cc4
-rw-r--r--net/http/http_request_headers.cc1
-rw-r--r--net/http/http_request_headers.h1
-rw-r--r--net/http/http_stream_parser.cc44
-rw-r--r--net/http/http_stream_parser.h5
-rw-r--r--net/http/http_util.cc11
-rw-r--r--net/spdy/spdy_http_stream.cc2
-rwxr-xr-xnet/tools/testserver/testserver.py38
-rw-r--r--net/url_request/url_request.cc21
-rw-r--r--net/url_request/url_request.h14
-rw-r--r--net/url_request/url_request_unittest.cc62
15 files changed, 280 insertions, 51 deletions
diff --git a/net/base/upload_data.cc b/net/base/upload_data.cc
index 664285d..f148cac 100644
--- a/net/base/upload_data.cc
+++ b/net/base/upload_data.cc
@@ -6,6 +6,7 @@
#include "base/file_util.h"
#include "base/logging.h"
+#include "base/string_util.h"
#include "net/base/file_stream.h"
#include "net/base/net_errors.h"
@@ -15,6 +16,7 @@ UploadData::Element::Element()
: type_(TYPE_BYTES),
file_range_offset_(0),
file_range_length_(kuint64max),
+ is_last_chunk_(false),
override_content_length_(false),
content_length_computed_(false),
content_length_(-1),
@@ -30,7 +32,7 @@ uint64 UploadData::Element::GetContentLength() {
if (override_content_length_ || content_length_computed_)
return content_length_;
- if (type_ == TYPE_BYTES)
+ if (type_ == TYPE_BYTES || type_ == TYPE_CHUNK)
return static_cast<uint64>(bytes_.size());
else if (type_ == TYPE_BLOB)
// The blob reference will be resolved later.
@@ -65,6 +67,18 @@ uint64 UploadData::Element::GetContentLength() {
return content_length_;
}
+void UploadData::Element::SetToChunk(const char* bytes, int bytes_len) {
+ std::string chunk_length = StringPrintf("%X\r\n", bytes_len);
+ bytes_.clear();
+ bytes_.insert(bytes_.end(), chunk_length.data(),
+ chunk_length.data() + chunk_length.length());
+ bytes_.insert(bytes_.end(), bytes, bytes + bytes_len);
+ const char* crlf = "\r\n";
+ bytes_.insert(bytes_.end(), crlf, crlf + 2);
+ type_ = TYPE_CHUNK;
+ is_last_chunk_ = (bytes_len == 0);
+}
+
FileStream* UploadData::Element::NewFileStreamForReading() {
// In common usage GetContentLength() will call this first and store the
// result into |file_| and a subsequent call (from UploadDataStream) will
@@ -97,10 +111,14 @@ FileStream* UploadData::Element::NewFileStreamForReading() {
return file.release();
}
-UploadData::UploadData() : identifier_(0) {
+UploadData::UploadData()
+ : identifier_(0),
+ chunk_callback_(NULL),
+ is_chunked_(false) {
}
void UploadData::AppendBytes(const char* bytes, int bytes_len) {
+ DCHECK(!is_chunked_);
if (bytes_len > 0) {
elements_.push_back(Element());
elements_.back().SetToBytes(bytes, bytes_len);
@@ -108,6 +126,7 @@ void UploadData::AppendBytes(const char* bytes, int bytes_len) {
}
void UploadData::AppendFile(const FilePath& file_path) {
+ DCHECK(!is_chunked_);
elements_.push_back(Element());
elements_.back().SetToFilePath(file_path);
}
@@ -115,17 +134,32 @@ void UploadData::AppendFile(const FilePath& file_path) {
void UploadData::AppendFileRange(const FilePath& file_path,
uint64 offset, uint64 length,
const base::Time& expected_modification_time) {
+ DCHECK(!is_chunked_);
elements_.push_back(Element());
elements_.back().SetToFilePathRange(file_path, offset, length,
expected_modification_time);
}
void UploadData::AppendBlob(const GURL& blob_url) {
+ DCHECK(!is_chunked_);
elements_.push_back(Element());
elements_.back().SetToBlobUrl(blob_url);
}
+void UploadData::AppendChunk(const char* bytes, int bytes_len) {
+ DCHECK(is_chunked_);
+ elements_.push_back(Element());
+ elements_.back().SetToChunk(bytes, bytes_len);
+ if (chunk_callback_)
+ chunk_callback_->OnChunkAvailable();
+}
+
+void UploadData::set_chunk_callback(ChunkCallback* callback) {
+ chunk_callback_ = callback;
+}
+
uint64 UploadData::GetContentLength() {
+ DCHECK(!is_chunked_);
uint64 len = 0;
std::vector<Element>::iterator it = elements_.begin();
for (; it != elements_.end(); ++it)
diff --git a/net/base/upload_data.h b/net/base/upload_data.h
index 6f72162..68ca26b 100644
--- a/net/base/upload_data.h
+++ b/net/base/upload_data.h
@@ -19,12 +19,27 @@ namespace net {
class FileStream;
+// Interface implemented by callers who require callbacks when new chunks
+// of data are added.
+class ChunkCallback {
+ public:
+ // Invoked when a new data chunk was given for a chunked transfer upload.
+ virtual void OnChunkAvailable() = 0;
+
+ protected:
+ virtual ~ChunkCallback() {}
+};
+
class UploadData : public base::RefCounted<UploadData> {
public:
enum Type {
TYPE_BYTES,
TYPE_FILE,
- TYPE_BLOB
+ TYPE_BLOB,
+
+ // A block of bytes to be sent in chunked encoding immediately, without
+ // waiting for rest of the data.
+ TYPE_CHUNK,
};
class Element {
@@ -72,6 +87,13 @@ class UploadData : public base::RefCounted<UploadData> {
blob_url_ = blob_url;
}
+ // Though similar to bytes, a chunk indicates that the element is sent via
+ // chunked transfer encoding and not buffered until the full upload data
+ // is available.
+ void SetToChunk(const char* bytes, int bytes_len);
+
+ bool is_last_chunk() const { return is_last_chunk_; }
+
// Returns the byte-length of the element. For files that do not exist, 0
// is returned. This is done for consistency with Mozilla.
// Once called, this function will always return the same value.
@@ -97,6 +119,7 @@ class UploadData : public base::RefCounted<UploadData> {
uint64 file_range_length_;
base::Time expected_file_modification_time_;
GURL blob_url_;
+ bool is_last_chunk_;
bool override_content_length_;
bool content_length_computed_;
uint64 content_length_;
@@ -119,6 +142,18 @@ class UploadData : public base::RefCounted<UploadData> {
void AppendBlob(const GURL& blob_url);
+ // Adds the given chunk of bytes to be sent immediately with chunked transfer
+ // encoding. Set bytes_len to zero for the last chunk.
+ void AppendChunk(const char* bytes, int bytes_len);
+
+ // Sets the callback to be invoked when a new chunk is available to upload.
+ void set_chunk_callback(ChunkCallback* callback);
+
+ // Initializes the object to send chunks of upload data over time rather
+ // than all at once.
+ void set_is_chunked(bool set) { is_chunked_ = set; }
+ bool is_chunked() const { return is_chunked_; }
+
// Returns the total size in bytes of the data to upload.
uint64 GetContentLength();
@@ -145,6 +180,8 @@ class UploadData : public base::RefCounted<UploadData> {
std::vector<Element> elements_;
int64 identifier_;
+ ChunkCallback* chunk_callback_;
+ bool is_chunked_;
DISALLOW_COPY_AND_ASSIGN(UploadData);
};
diff --git a/net/base/upload_data_stream.cc b/net/base/upload_data_stream.cc
index 0ec4c97..1f77f06 100644
--- a/net/base/upload_data_stream.cc
+++ b/net/base/upload_data_stream.cc
@@ -12,9 +12,6 @@
namespace net {
-UploadDataStream::~UploadDataStream() {
-}
-
UploadDataStream* UploadDataStream::Create(UploadData* data, int* error_code) {
scoped_ptr<UploadDataStream> stream(new UploadDataStream(data));
int rv = stream->FillBuf();
@@ -26,13 +23,15 @@ UploadDataStream* UploadDataStream::Create(UploadData* data, int* error_code) {
return stream.release();
}
-void UploadDataStream::DidConsume(size_t num_bytes) {
+void UploadDataStream::MarkConsumedAndFillBuffer(size_t num_bytes) {
DCHECK_LE(num_bytes, buf_len_);
DCHECK(!eof_);
- buf_len_ -= num_bytes;
- if (buf_len_)
- memmove(buf_->data(), buf_->data() + num_bytes, buf_len_);
+ if (num_bytes) {
+ buf_len_ -= num_bytes;
+ if (buf_len_)
+ memmove(buf_->data(), buf_->data() + num_bytes, buf_len_);
+ }
FillBuf();
@@ -43,25 +42,28 @@ UploadDataStream::UploadDataStream(UploadData* data)
: data_(data),
buf_(new IOBuffer(kBufSize)),
buf_len_(0),
- next_element_(data->elements()->begin()),
+ next_element_(0),
next_element_offset_(0),
next_element_remaining_(0),
- total_size_(data->GetContentLength()),
+ total_size_(data->is_chunked() ? 0 : data->GetContentLength()),
current_position_(0),
eof_(false) {
}
+UploadDataStream::~UploadDataStream() {
+}
+
int UploadDataStream::FillBuf() {
- std::vector<UploadData::Element>::iterator end =
- data_->elements()->end();
+ std::vector<UploadData::Element>& elements = *data_->elements();
- while (buf_len_ < kBufSize && next_element_ != end) {
+ while (buf_len_ < kBufSize && next_element_ < elements.size()) {
bool advance_to_next_element = false;
- UploadData::Element& element = *next_element_;
+ UploadData::Element& element = elements[next_element_];
size_t size_remaining = kBufSize - buf_len_;
- if (element.type() == UploadData::TYPE_BYTES) {
+ if (element.type() == UploadData::TYPE_BYTES ||
+ element.type() == UploadData::TYPE_CHUNK) {
const std::vector<char>& d = element.bytes();
size_t count = d.size() - next_element_offset_;
@@ -126,8 +128,12 @@ int UploadDataStream::FillBuf() {
}
}
- if (next_element_ == end && !buf_len_)
- eof_ = true;
+ if (next_element_ == elements.size() && !buf_len_) {
+ if (!data_->is_chunked() ||
+ (!elements.empty() && elements.back().is_last_chunk())) {
+ eof_ = true;
+ }
+ }
return OK;
}
diff --git a/net/base/upload_data_stream.h b/net/base/upload_data_stream.h
index 8583fb4..f291140 100644
--- a/net/base/upload_data_stream.h
+++ b/net/base/upload_data_stream.h
@@ -30,7 +30,12 @@ class UploadDataStream {
// Call to indicate that a portion of the stream's buffer was consumed. This
// call modifies the stream's buffer so that it contains the next segment of
// the upload data to be consumed.
- void DidConsume(size_t num_bytes);
+ void MarkConsumedAndFillBuffer(size_t num_bytes);
+
+ // Sets the callback to be invoked when new chunks are available to upload.
+ void set_chunk_callback(ChunkCallback* callback) {
+ data_->set_chunk_callback(callback);
+ }
// Returns the total size of the data stream and the current position.
// size() is not to be used to determine whether the stream has ended
@@ -39,6 +44,8 @@ class UploadDataStream {
uint64 size() const { return total_size_; }
uint64 position() const { return current_position_; }
+ bool is_chunked() const { return data_->is_chunked(); }
+
// Returns whether there is no more data to read, regardless of whether
// position < size.
bool eof() const { return eof_; }
@@ -64,8 +71,8 @@ class UploadDataStream {
scoped_refptr<IOBuffer> buf_;
size_t buf_len_;
- // Iterator to the upload element to be written to the send buffer next.
- std::vector<UploadData::Element>::iterator next_element_;
+ // Index of the upload element to be written to the send buffer next.
+ size_t next_element_;
// The byte offset into next_element_'s data buffer if the next element is
// a TYPE_BYTES element.
diff --git a/net/base/upload_data_stream_unittest.cc b/net/base/upload_data_stream_unittest.cc
index 8a21ad6..780bd23 100644
--- a/net/base/upload_data_stream_unittest.cc
+++ b/net/base/upload_data_stream_unittest.cc
@@ -50,7 +50,7 @@ TEST_F(UploadDataStreamTest, ConsumeAll) {
UploadDataStream::Create(upload_data_, NULL));
ASSERT_TRUE(stream.get());
while (!stream->eof()) {
- stream->DidConsume(stream->buf_len());
+ stream->MarkConsumedAndFillBuffer(stream->buf_len());
}
}
@@ -76,7 +76,7 @@ TEST_F(UploadDataStreamTest, FileSmallerThanLength) {
uint64 read_counter = 0;
while (!stream->eof()) {
read_counter += stream->buf_len();
- stream->DidConsume(stream->buf_len());
+ stream->MarkConsumedAndFillBuffer(stream->buf_len());
}
// UpdateDataStream will pad out the file with 0 bytes so that the HTTP
// transaction doesn't hang. Therefore we expected the full size.
diff --git a/net/http/http_request_headers.cc b/net/http/http_request_headers.cc
index 93ead01..9ce77bf 100644
--- a/net/http/http_request_headers.cc
+++ b/net/http/http_request_headers.cc
@@ -30,6 +30,7 @@ const char HttpRequestHeaders::kProxyConnection[] = "Proxy-Connection";
const char HttpRequestHeaders::kRange[] = "Range";
const char HttpRequestHeaders::kReferer[] = "Referer";
const char HttpRequestHeaders::kUserAgent[] = "User-Agent";
+const char HttpRequestHeaders::kTransferEncoding[] = "Transfer-Encoding";
HttpRequestHeaders::HeaderKeyValuePair::HeaderKeyValuePair() {
}
diff --git a/net/http/http_request_headers.h b/net/http/http_request_headers.h
index 2962ae2..ef4b60d 100644
--- a/net/http/http_request_headers.h
+++ b/net/http/http_request_headers.h
@@ -73,6 +73,7 @@ class HttpRequestHeaders {
static const char kRange[];
static const char kReferer[];
static const char kUserAgent[];
+ static const char kTransferEncoding[];
HttpRequestHeaders();
~HttpRequestHeaders();
diff --git a/net/http/http_stream_parser.cc b/net/http/http_stream_parser.cc
index 2a3fb15..2d00a26 100644
--- a/net/http/http_stream_parser.cc
+++ b/net/http/http_stream_parser.cc
@@ -43,7 +43,10 @@ HttpStreamParser::HttpStreamParser(ClientSocketHandle* connection,
DCHECK_EQ(0, read_buffer->offset());
}
-HttpStreamParser::~HttpStreamParser() {}
+HttpStreamParser::~HttpStreamParser() {
+ if (request_body_ != NULL && request_body_->is_chunked())
+ request_body_->set_chunk_callback(NULL);
+}
int HttpStreamParser::SendRequest(const std::string& request_line,
const HttpRequestHeaders& headers,
@@ -67,6 +70,8 @@ int HttpStreamParser::SendRequest(const std::string& request_line,
request_headers_ = new DrainableIOBuffer(headers_io_buf,
headers_io_buf->size());
request_body_.reset(request_body);
+ if (request_body_ != NULL && request_body_->is_chunked())
+ request_body_->set_chunk_callback(this);
io_state_ = STATE_SENDING_HEADERS;
int result = DoLoop(OK);
@@ -143,6 +148,16 @@ void HttpStreamParser::OnIOComplete(int result) {
}
}
+void HttpStreamParser::OnChunkAvailable() {
+ // This method may get called while sending the headers or body, so check
+ // before processing the new data. If we were still initializing or sending
+ // headers, we will automatically start reading the chunks once we get into
+ // STATE_SENDING_BODY so nothing to do here.
+ DCHECK(io_state_ == STATE_SENDING_HEADERS || io_state_ == STATE_SENDING_BODY);
+ if (io_state_ == STATE_SENDING_BODY)
+ OnIOComplete(0);
+}
+
int HttpStreamParser::DoLoop(int result) {
bool can_do_more = true;
do {
@@ -208,12 +223,16 @@ int HttpStreamParser::DoSendHeaders(int result) {
// We'll record the count of uncoalesced packets IFF coalescing will help,
// and otherwise we'll use an enum to tell why it won't help.
enum COALESCE_POTENTIAL {
- NO_ADVANTAGE = 0, // Coalescing won't reduce packet count.
- HEADER_ONLY = 1, // There is only a header packet (can't coalesce).
- COALESCE_POTENTIAL_MAX = 30 // Various cases of coalasced savings.
+ // Coalescing won't reduce packet count.
+ NO_ADVANTAGE = 0,
+ // There is only a header packet or we have a request body but the
+ // request body isn't available yet (can't coalesce).
+ HEADER_ONLY = 1,
+ // Various cases of coalasced savings.
+ COALESCE_POTENTIAL_MAX = 30
};
size_t coalesce = HEADER_ONLY;
- if (request_body_ != NULL) {
+ if (request_body_ != NULL && !request_body_->is_chunked()) {
const size_t kBytesPerPacket = 1430;
uint64 body_packets = (request_body_->size() + kBytesPerPacket - 1) /
kBytesPerPacket;
@@ -236,7 +255,8 @@ int HttpStreamParser::DoSendHeaders(int result) {
result = connection_->socket()->Write(request_headers_,
bytes_remaining,
&io_callback_);
- } else if (request_body_ != NULL && request_body_->size()) {
+ } else if (request_body_ != NULL &&
+ (request_body_->is_chunked() || request_body_->size())) {
io_state_ = STATE_SENDING_BODY;
result = OK;
} else {
@@ -246,13 +266,17 @@ int HttpStreamParser::DoSendHeaders(int result) {
}
int HttpStreamParser::DoSendBody(int result) {
- if (result > 0)
- request_body_->DidConsume(result);
+ request_body_->MarkConsumedAndFillBuffer(result);
if (!request_body_->eof()) {
int buf_len = static_cast<int>(request_body_->buf_len());
- result = connection_->socket()->Write(request_body_->buf(), buf_len,
- &io_callback_);
+ if (buf_len) {
+ result = connection_->socket()->Write(request_body_->buf(), buf_len,
+ &io_callback_);
+ } else {
+ // More POST data is to come hence wait for the callback.
+ result = ERR_IO_PENDING;
+ }
} else {
io_state_ = STATE_REQUEST_SENT;
}
diff --git a/net/http/http_stream_parser.h b/net/http/http_stream_parser.h
index bbd551f..5f7e943 100644
--- a/net/http/http_stream_parser.h
+++ b/net/http/http_stream_parser.h
@@ -26,7 +26,7 @@ class IOBuffer;
class SSLCertRequestInfo;
class SSLInfo;
-class HttpStreamParser {
+class HttpStreamParser : public ChunkCallback {
public:
// Any data in |read_buffer| will be used before reading from the socket
// and any data left over after parsing the stream will be put into
@@ -71,6 +71,9 @@ class HttpStreamParser {
void GetSSLCertRequestInfo(SSLCertRequestInfo* cert_request_info);
+ // ChunkCallback methods.
+ virtual void OnChunkAvailable();
+
private:
// FOO_COMPLETE states implement the second half of potentially asynchronous
// operations and don't necessarily mean that FOO is complete.
diff --git a/net/http/http_util.cc b/net/http/http_util.cc
index bf56136..0f28c83 100644
--- a/net/http/http_util.cc
+++ b/net/http/http_util.cc
@@ -674,9 +674,14 @@ void HttpUtil::BuildRequestHeaders(const HttpRequestInfo* request_info,
// Add a content length header?
if (upload_data_stream) {
- request_headers->SetHeader(
- HttpRequestHeaders::kContentLength,
- base::Uint64ToString(upload_data_stream->size()));
+ if (upload_data_stream->is_chunked()) {
+ request_headers->SetHeader(
+ HttpRequestHeaders::kTransferEncoding, "chunked");
+ } else {
+ request_headers->SetHeader(
+ HttpRequestHeaders::kContentLength,
+ base::Uint64ToString(upload_data_stream->size()));
+ }
} else if (request_info->method == "POST" || request_info->method == "PUT" ||
request_info->method == "HEAD") {
// An empty POST/PUT request still needs a content length. As for HEAD,
diff --git a/net/spdy/spdy_http_stream.cc b/net/spdy/spdy_http_stream.cc
index 15acff7..6a6abb2 100644
--- a/net/spdy/spdy_http_stream.cc
+++ b/net/spdy/spdy_http_stream.cc
@@ -264,7 +264,7 @@ int SpdyHttpStream::OnSendBody() {
bool SpdyHttpStream::OnSendBodyComplete(int status) {
CHECK(request_body_stream_.get());
- request_body_stream_->DidConsume(status);
+ request_body_stream_->MarkConsumedAndFillBuffer(status);
return request_body_stream_->eof();
}
diff --git a/net/tools/testserver/testserver.py b/net/tools/testserver/testserver.py
index acfafae..f44fabc 100755
--- a/net/tools/testserver/testserver.py
+++ b/net/tools/testserver/testserver.py
@@ -593,6 +593,26 @@ class TestPageHandler(BasePageHandler):
return True
+ def ReadRequestBody(self):
+ """This function reads the body of the current HTTP request, handling
+ both plain and chunked transfer encoded requests."""
+
+ if self.headers.getheader('transfer-encoding') != 'chunked':
+ length = int(self.headers.getheader('content-length'))
+ return self.rfile.read(length)
+
+ # Read the request body as chunks.
+ body = ""
+ while True:
+ line = self.rfile.readline()
+ length = int(line, 16)
+ if length == 0:
+ self.rfile.readline()
+ break
+ body += self.rfile.read(length)
+ self.rfile.read(2)
+ return body
+
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
@@ -603,9 +623,7 @@ class TestPageHandler(BasePageHandler):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
- length = int(self.headers.getheader('content-length'))
- request = self.rfile.read(length)
- self.wfile.write(request)
+ self.wfile.write(self.ReadRequestBody())
return True
def EchoTitleHandler(self):
@@ -617,8 +635,7 @@ class TestPageHandler(BasePageHandler):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
- length = int(self.headers.getheader('content-length'))
- request = self.rfile.read(length)
+ request = self.ReadRequestBody()
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
@@ -642,8 +659,7 @@ class TestPageHandler(BasePageHandler):
'<h1>Request Body:</h1><pre>')
if self.command == 'POST' or self.command == 'PUT':
- length = int(self.headers.getheader('content-length'))
- qs = self.rfile.read(length)
+ qs = self.ReadRequestBody()
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
@@ -745,7 +761,7 @@ class TestPageHandler(BasePageHandler):
# Consume a request body if present.
if self.command == 'POST' or self.command == 'PUT' :
- self.rfile.read(int(self.headers.getheader('content-length')))
+ self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
sub_path = url_path[len(prefix):]
@@ -1262,8 +1278,7 @@ class TestPageHandler(BasePageHandler):
if not self._ShouldHandleRequest("/device_management"):
return False
- length = int(self.headers.getheader('content-length'))
- raw_request = self.rfile.read(length)
+ raw_request = self.ReadRequestBody()
if not self.server._device_management_handler:
import device_management
@@ -1324,8 +1339,7 @@ class SyncPageHandler(BasePageHandler):
if not self._ShouldHandleRequest(test_name):
return False
- length = int(self.headers.getheader('content-length'))
- raw_request = self.rfile.read(length)
+ raw_request = self.ReadRequestBody()
http_response, raw_reply = self.server.HandleCommand(
self.path, raw_request)
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index d901d83..300c9c6 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -160,6 +160,27 @@ void URLRequest::AppendFileRangeToUpload(
expected_modification_time);
}
+void URLRequest::EnableChunkedUpload() {
+ DCHECK(!upload_ || upload_->is_chunked());
+ if (!upload_) {
+ upload_ = new UploadData();
+ upload_->set_is_chunked(true);
+ }
+}
+
+void URLRequest::AppendChunkToUpload(const char* bytes, int bytes_len) {
+ DCHECK(upload_);
+ DCHECK(upload_->is_chunked());
+ DCHECK_GT(bytes_len, 0);
+ upload_->AppendChunk(bytes, bytes_len);
+}
+
+void URLRequest::MarkEndOfChunks() {
+ DCHECK(upload_);
+ DCHECK(upload_->is_chunked());
+ upload_->AppendChunk(NULL, 0);
+}
+
void URLRequest::set_upload(net::UploadData* upload) {
upload_ = upload;
}
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index f257ed6..3f399a6 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -320,6 +320,20 @@ class URLRequest : public base::NonThreadSafe {
AppendFileRangeToUpload(file_path, 0, kuint64max, base::Time());
}
+ // Indicates that the request body should be sent using chunked transfer
+ // encoding. This method may only be called before Start() is called.
+ void EnableChunkedUpload();
+
+ // Appends the given bytes to the request's upload data to be sent
+ // immediately via chunked transfer encoding. When all data has been sent,
+ // call MarkEndOfChunks() to indicate the end of upload data.
+ //
+ // This method may be called only after calling EnableChunkedUpload().
+ void AppendChunkToUpload(const char* bytes, int bytes_len);
+
+ // Indicates the end of a chunked transfer encoded request body.
+ void MarkEndOfChunks();
+
// Set the upload data directly.
void set_upload(net::UploadData* upload);
diff --git a/net/url_request/url_request_unittest.cc b/net/url_request/url_request_unittest.cc
index f920fa4..ce02eed 100644
--- a/net/url_request/url_request_unittest.cc
+++ b/net/url_request/url_request_unittest.cc
@@ -177,6 +177,31 @@ class URLRequestTestHTTP : public URLRequestTest {
delete[] uploadBytes;
}
+ void AddChunksToUpload(TestURLRequest* r) {
+ r->AppendChunkToUpload("a", 1);
+ r->AppendChunkToUpload("bcd", 3);
+ r->AppendChunkToUpload("this is a longer chunk than before.", 35);
+ r->AppendChunkToUpload("\r\n\r\n", 4);
+ r->AppendChunkToUpload("0", 1);
+ r->AppendChunkToUpload("2323", 4);
+ r->MarkEndOfChunks();
+ }
+
+ void VerifyReceivedDataMatchesChunks(TestURLRequest* r, TestDelegate* d) {
+ // This should match the chunks sent by AddChunksToUpload().
+ const char* expected_data =
+ "abcdthis is a longer chunk than before.\r\n\r\n02323";
+
+ ASSERT_EQ(1, d->response_started_count()) << "request failed: " <<
+ (int) r->status().status() << ", os error: " << r->status().os_error();
+
+ EXPECT_FALSE(d->received_data_before_response());
+
+ ASSERT_EQ(strlen(expected_data), static_cast<size_t>(d->bytes_received()));
+ EXPECT_EQ(0, memcmp(d->data_received().c_str(), expected_data,
+ strlen(expected_data)));
+ }
+
net::TestServer test_server_;
};
@@ -639,6 +664,43 @@ TEST_F(URLRequestTestHTTP, PostFileTest) {
}
}
+TEST_F(URLRequestTestHTTP, TestPostChunkedDataBeforeStart) {
+ ASSERT_TRUE(test_server_.Start());
+
+ TestDelegate d;
+ {
+ TestURLRequest r(test_server_.GetURL("echo"), &d);
+ r.EnableChunkedUpload();
+ r.set_method("POST");
+ AddChunksToUpload(&r);
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->Run();
+
+ VerifyReceivedDataMatchesChunks(&r, &d);
+ }
+}
+
+TEST_F(URLRequestTestHTTP, TestPostChunkedDataAfterStart) {
+ ASSERT_TRUE(test_server_.Start());
+
+ TestDelegate d;
+ {
+ TestURLRequest r(test_server_.GetURL("echo"), &d);
+ r.EnableChunkedUpload();
+ r.set_method("POST");
+ r.Start();
+ EXPECT_TRUE(r.is_pending());
+
+ MessageLoop::current()->RunAllPending();
+ AddChunksToUpload(&r);
+ MessageLoop::current()->Run();
+
+ VerifyReceivedDataMatchesChunks(&r, &d);
+ }
+}
+
TEST_F(URLRequestTest, AboutBlankTest) {
TestDelegate d;
{