diff options
author | satorux@chromium.org <satorux@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-02-08 06:07:00 +0000 |
---|---|---|
committer | satorux@chromium.org <satorux@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-02-08 06:07:00 +0000 |
commit | 0ab2e24c5fd9b65352e54456954f6a9406c2f8df (patch) | |
tree | 245bc5d1be6c3119e8e00c804cbbe5cb9d0c11c3 /net/http/http_stream_parser.cc | |
parent | 631a595935c53d14158fd5ec2e9348a249a34957 (diff) | |
download | chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.zip chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.tar.gz chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.tar.bz2 |
net: Rework UploadDataStream API by introducing Read().
Replace buf()+buf_len()+MarkConsumedAndFillBuffer()+GetBufferSize() with
a single function Read(). This is done by externalizing the read buffer.
As a byproduct, use of memmove() in handling of SPDY HTTP requests is
significantly reduced. There, a write is done with kMaxSpdyFrameChunkSize
which is about 2.8KB, at a time, that caused MarkConsumedAndFillBuffer()
to move the remaining 13.2KB data in the internal buffer to the beginning by
memmove(), which ends up memmoving 13.2KB of data every time a chunk of 2.8KB
is written.
Along the way, UploadDataStream::IsOnLastChunk() is removed, which is
no longer necessary. Some TODO(satish)s have also been addressed.
This is in preparation for adding asynchronous API to UploadDataStream.
This is why Read() takes IOBuffer*, rather than char*.
BUG=72001
TEST=try bots
Review URL: https://chromiumcodereview.appspot.com/9317055
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@120946 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/http/http_stream_parser.cc')
-rw-r--r-- | net/http/http_stream_parser.cc | 118 |
1 files changed, 67 insertions, 51 deletions
diff --git a/net/http/http_stream_parser.cc b/net/http/http_stream_parser.cc index 72f4ac9..6b81a66 100644 --- a/net/http/http_stream_parser.cc +++ b/net/http/http_stream_parser.cc @@ -21,7 +21,8 @@ namespace { -static const size_t kMaxMergedHeaderAndBodySize = 1400; +const size_t kMaxMergedHeaderAndBodySize = 1400; +const size_t kRequestBodyBufferSize = 1 << 14; // 16KB std::string GetResponseHeaderLines(const net::HttpResponseHeaders& headers) { std::string raw_headers = headers.raw_headers(); @@ -181,7 +182,6 @@ HttpStreamParser::HttpStreamParser(ClientSocketHandle* connection, io_callback_( base::Bind(&HttpStreamParser::OnIOComplete, base::Unretained(this)))), - chunk_length_without_encoding_(0), sent_last_chunk_(false) { } @@ -220,12 +220,15 @@ int HttpStreamParser::SendRequest(const std::string& request_line, std::string request = request_line + headers.ToString(); request_body_.reset(request_body); - if (request_body_ != NULL && request_body_->is_chunked()) { - request_body_->set_chunk_callback(this); - // The chunk buffer is guaranteed to be large enough to hold the encoded - // chunk. - chunk_buf_ = new SeekableIOBuffer(UploadDataStream::GetBufferSize() + - kChunkHeaderFooterSize); + if (request_body_ != NULL) { + request_body_buf_ = new SeekableIOBuffer(kRequestBodyBufferSize); + if (request_body_->is_chunked()) { + request_body_->set_chunk_callback(this); + // The chunk buffer is adjusted to guarantee that |request_body_buf_| + // is large enough to hold the encoded chunk. + chunk_buf_ = new IOBufferWithSize(kRequestBodyBufferSize - + kChunkHeaderFooterSize); + } } io_state_ = STATE_SENDING_HEADERS; @@ -242,19 +245,19 @@ int HttpStreamParser::SendRequest(const std::string& request_line, request_headers_ = new DrainableIOBuffer( merged_request_headers_and_body, merged_size); - char *buf = request_headers_->data(); - memcpy(buf, request.data(), request.size()); - buf += request.size(); + memcpy(request_headers_->data(), request.data(), request.size()); + request_headers_->DidConsume(request.size()); size_t todo = request_body_->size(); while (todo) { - size_t buf_len = request_body_->buf_len(); - memcpy(buf, request_body_->buf()->data(), buf_len); - todo -= buf_len; - buf += buf_len; - request_body_->MarkConsumedAndFillBuffer(buf_len); + int consumed = request_body_->Read(request_headers_, todo); + DCHECK_GT(consumed, 0); // Read() won't fail if not chunked. + request_headers_->DidConsume(consumed); + todo -= consumed; } - DCHECK(request_body_->eof()); + DCHECK(request_body_->IsEOF()); + // Reset the offset, so the buffer can be read from the beginning. + request_headers_->SetOffset(0); did_merge = true; } @@ -428,8 +431,8 @@ int HttpStreamParser::DoSendHeaders(int result) { io_state_ = STATE_SENDING_CHUNKED_BODY; result = OK; } else if (request_body_ != NULL && request_body_->size() > 0 && - // !eof() indicates that the body wasn't merged. - !request_body_->eof()) { + // !IsEOF() indicates that the body wasn't merged. + !request_body_->IsEOF()) { io_state_ = STATE_SENDING_NON_CHUNKED_BODY; result = OK; } else { @@ -442,11 +445,11 @@ int HttpStreamParser::DoSendChunkedBody(int result) { // |result| is the number of bytes sent from the last call to // DoSendChunkedBody(), or 0 (i.e. OK) the first time. - // Send the remaining data in the chunk buffer. - chunk_buf_->DidConsume(result); - if (chunk_buf_->BytesRemaining() > 0) { - return connection_->socket()->Write(chunk_buf_, - chunk_buf_->BytesRemaining(), + // Send the remaining data in the request body buffer. + request_body_buf_->DidConsume(result); + if (request_body_buf_->BytesRemaining() > 0) { + return connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), io_callback_); } @@ -455,32 +458,33 @@ int HttpStreamParser::DoSendChunkedBody(int result) { return OK; } - // |chunk_length_without_encoding_| is 0 when DoSendBody() is first - // called, hence the first call to MarkConsumedAndFillBuffer() is a noop. - request_body_->MarkConsumedAndFillBuffer(chunk_length_without_encoding_); - chunk_length_without_encoding_ = 0; - - if (request_body_->eof()) { - chunk_buf_->Clear(); - const int chunk_length = EncodeChunk( - base::StringPiece(), chunk_buf_->data(), chunk_buf_->capacity()); - chunk_buf_->DidAppend(chunk_length); + const int consumed = request_body_->Read(chunk_buf_, chunk_buf_->size()); + if (consumed == 0) { // Reached the end. + DCHECK(request_body_->IsEOF()); + request_body_buf_->Clear(); + const int chunk_length = EncodeChunk(base::StringPiece(), + request_body_buf_->data(), + request_body_buf_->capacity()); + request_body_buf_->DidAppend(chunk_length); sent_last_chunk_ = true; - } else if (request_body_->buf_len() > 0) { + } else if (consumed > 0) { // Encode and send the buffer as 1 chunk. - const base::StringPiece payload(request_body_->buf()->data(), - request_body_->buf_len()); - chunk_buf_->Clear(); - const int chunk_length = EncodeChunk( - payload, chunk_buf_->data(), chunk_buf_->capacity()); - chunk_buf_->DidAppend(chunk_length); - chunk_length_without_encoding_ = payload.size(); - } else { - // Nothing to send. More POST data is yet to come? + const base::StringPiece payload(chunk_buf_->data(), consumed); + request_body_buf_->Clear(); + const int chunk_length = EncodeChunk(payload, + request_body_buf_->data(), + request_body_buf_->capacity()); + request_body_buf_->DidAppend(chunk_length); + } else if (consumed == ERR_IO_PENDING) { + // Nothing to send. More POST data is yet to come. return ERR_IO_PENDING; + } else { + // There won't be other errors. + NOTREACHED(); } - return connection_->socket()->Write(chunk_buf_, chunk_buf_->BytesRemaining(), + return connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), io_callback_); } @@ -488,15 +492,27 @@ int HttpStreamParser::DoSendNonChunkedBody(int result) { // |result| is the number of bytes sent from the last call to // DoSendNonChunkedBody(), or 0 (i.e. OK) the first time. - // The first call to MarkConsumedAndFillBuffer() is a noop as |result| is 0. - request_body_->MarkConsumedAndFillBuffer(result); + // Send the remaining data in the request body buffer. + request_body_buf_->DidConsume(result); + if (request_body_buf_->BytesRemaining() > 0) { + return connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), + io_callback_); + } - if (!request_body_->eof()) { - int buf_len = static_cast<int>(request_body_->buf_len()); - result = connection_->socket()->Write(request_body_->buf(), buf_len, + request_body_buf_->Clear(); + const int consumed = request_body_->Read(request_body_buf_, + request_body_buf_->capacity()); + if (consumed == 0) { // Reached the end. + io_state_ = STATE_REQUEST_SENT; + } else if (consumed > 0) { + request_body_buf_->DidAppend(consumed); + result = connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), io_callback_); } else { - io_state_ = STATE_REQUEST_SENT; + // UploadDataStream::Read() won't fail if not chunked. + NOTREACHED(); } return result; } |