diff options
author | satorux@chromium.org <satorux@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-02-08 06:07:00 +0000 |
---|---|---|
committer | satorux@chromium.org <satorux@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-02-08 06:07:00 +0000 |
commit | 0ab2e24c5fd9b65352e54456954f6a9406c2f8df (patch) | |
tree | 245bc5d1be6c3119e8e00c804cbbe5cb9d0c11c3 /net | |
parent | 631a595935c53d14158fd5ec2e9348a249a34957 (diff) | |
download | chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.zip chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.tar.gz chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.tar.bz2 |
net: Rework UploadDataStream API by introducing Read().
Replace buf()+buf_len()+MarkConsumedAndFillBuffer()+GetBufferSize() with
a single function Read(). This is done by externalizing the read buffer.
As a byproduct, use of memmove() in handling of SPDY HTTP requests is
significantly reduced. There, a write is done with kMaxSpdyFrameChunkSize
which is about 2.8KB, at a time, that caused MarkConsumedAndFillBuffer()
to move the remaining 13.2KB data in the internal buffer to the beginning by
memmove(), which ends up memmoving 13.2KB of data every time a chunk of 2.8KB
is written.
Along the way, UploadDataStream::IsOnLastChunk() is removed, which is
no longer necessary. Some TODO(satish)s have also been addressed.
This is in preparation for adding asynchronous API to UploadDataStream.
This is why Read() takes IOBuffer*, rather than char*.
BUG=72001
TEST=try bots
Review URL: https://chromiumcodereview.appspot.com/9317055
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@120946 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/base/upload_data_stream.cc | 117 | ||||
-rw-r--r-- | net/base/upload_data_stream.h | 62 | ||||
-rw-r--r-- | net/base/upload_data_stream_unittest.cc | 23 | ||||
-rw-r--r-- | net/http/http_stream_parser.cc | 118 | ||||
-rw-r--r-- | net/http/http_stream_parser.h | 4 | ||||
-rw-r--r-- | net/spdy/spdy_http_stream.cc | 59 | ||||
-rw-r--r-- | net/spdy/spdy_http_stream.h | 8 | ||||
-rw-r--r-- | net/spdy/spdy_network_transaction_unittest.cc | 7 | ||||
-rw-r--r-- | net/spdy/spdy_stream.h | 3 |
9 files changed, 205 insertions, 196 deletions
diff --git a/net/base/upload_data_stream.cc b/net/base/upload_data_stream.cc index 550a117..937ee09 100644 --- a/net/base/upload_data_stream.cc +++ b/net/base/upload_data_stream.cc @@ -13,9 +13,18 @@ namespace net { -const size_t UploadDataStream::kBufferSize = 16384; bool UploadDataStream::merge_chunks_ = true; +UploadDataStream::UploadDataStream(UploadData* upload_data) + : upload_data_(upload_data), + element_index_(0), + element_offset_(0), + element_file_bytes_remaining_(0), + total_size_(0), + current_position_(0), + initialized_successfully_(false) { +} + UploadDataStream::~UploadDataStream() { } @@ -23,56 +32,43 @@ int UploadDataStream::Init() { DCHECK(!initialized_successfully_); total_size_ = upload_data_->GetContentLength(); - const int result = FillBuffer(); - initialized_successfully_ = (result == OK); - return result; -} -// static -size_t UploadDataStream::GetBufferSize() { - return kBufferSize; -} - -void UploadDataStream::MarkConsumedAndFillBuffer(size_t num_bytes) { - DCHECK(initialized_successfully_); - DCHECK_LE(num_bytes, buf_len_); - DCHECK(!eof_); - - if (num_bytes) { - buf_len_ -= num_bytes; - // Move the remaining data to the beginning. - if (buf_len_ > 0) - memmove(buf_->data(), buf_->data() + num_bytes, buf_len_); + // If the underlying file has been changed and the expected file + // modification time is set, treat it as error. Note that the expected + // modification time from WebKit is based on time_t precision. So we + // have to convert both to time_t to compare. This check is used for + // sliced files. + const std::vector<UploadData::Element>& elements = *upload_data_->elements(); + for (size_t i = 0; i < elements.size(); ++i) { + const UploadData::Element& element = elements[i]; + if (element.type() == UploadData::TYPE_FILE && + !element.expected_file_modification_time().is_null()) { + // Temporarily allow until fix: http://crbug.com/72001. + base::ThreadRestrictions::ScopedAllowIO allow_io; + base::PlatformFileInfo info; + if (file_util::GetFileInfo(element.file_path(), &info) && + element.expected_file_modification_time().ToTimeT() != + info.last_modified.ToTimeT()) { + return ERR_UPLOAD_FILE_CHANGED; + } + } } - FillBuffer(); - - current_position_ += num_bytes; -} - -UploadDataStream::UploadDataStream(UploadData* upload_data) - : upload_data_(upload_data), - buf_(new IOBuffer(kBufferSize)), - buf_len_(0), - element_index_(0), - element_offset_(0), - element_file_bytes_remaining_(0), - total_size_(0), - current_position_(0), - eof_(false), - initialized_successfully_(false) { + initialized_successfully_ = true; + return OK; } -int UploadDataStream::FillBuffer() { +int UploadDataStream::Read(IOBuffer* buf, int buf_len) { std::vector<UploadData::Element>& elements = *upload_data_->elements(); - while (buf_len_ < kBufferSize && element_index_ < elements.size()) { + int bytes_copied = 0; + while (bytes_copied < buf_len && element_index_ < elements.size()) { bool advance_to_next_element = false; // This is not const as GetContentLength() is not const. UploadData::Element& element = elements[element_index_]; - const size_t free_buffer_space = kBufferSize - buf_len_; + const size_t free_buffer_space = buf_len - bytes_copied; if (element.type() == UploadData::TYPE_BYTES || element.type() == UploadData::TYPE_CHUNK) { const std::vector<char>& element_data = element.bytes(); @@ -86,10 +82,9 @@ int UploadDataStream::FillBuffer() { // the address of an element in |element_data| and that will throw an // exception if |element_data| is an empty vector. if (num_bytes_to_copy > 0) { - memcpy(buf_->data() + buf_len_, - &element_data[element_offset_], + memcpy(buf->data() + bytes_copied, &element_data[element_offset_], num_bytes_to_copy); - buf_len_ += num_bytes_to_copy; + bytes_copied += num_bytes_to_copy; element_offset_ += num_bytes_to_copy; } @@ -102,17 +97,6 @@ int UploadDataStream::FillBuffer() { // Open the file of the current element if not yet opened. if (!element_file_stream_.get()) { - // If the underlying file has been changed, treat it as error. - // Note that the expected modification time from WebKit is based on - // time_t precision. So we have to convert both to time_t to compare. - if (!element.expected_file_modification_time().is_null()) { - base::PlatformFileInfo info; - if (file_util::GetFileInfo(element.file_path(), &info) && - element.expected_file_modification_time().ToTimeT() != - info.last_modified.ToTimeT()) { - return ERR_UPLOAD_FILE_CHANGED; - } - } element_file_bytes_remaining_ = element.GetContentLength(); element_file_stream_.reset(element.NewFileStreamForReading()); } @@ -128,7 +112,7 @@ int UploadDataStream::FillBuffer() { // missing or not readable. if (element_file_stream_.get()) { num_bytes_consumed = - element_file_stream_->Read(buf_->data() + buf_len_, + element_file_stream_->Read(buf->data() + bytes_copied, num_bytes_to_read, CompletionCallback()); } @@ -136,10 +120,10 @@ int UploadDataStream::FillBuffer() { // If there's less data to read than we initially observed, then // pad with zero. Otherwise the server will hang waiting for the // rest of the data. - memset(buf_->data() + buf_len_, 0, num_bytes_to_read); + memset(buf->data() + bytes_copied, 0, num_bytes_to_read); num_bytes_consumed = num_bytes_to_read; } - buf_len_ += num_bytes_consumed; + bytes_copied += num_bytes_consumed; element_file_bytes_remaining_ -= num_bytes_consumed; } @@ -156,9 +140,11 @@ int UploadDataStream::FillBuffer() { break; } - eof_ = IsEOF(); + current_position_ += bytes_copied; + if (is_chunked() && !IsEOF() && bytes_copied == 0) + return ERR_IO_PENDING; - return OK; + return bytes_copied; } void UploadDataStream::AdvanceToNextElement() { @@ -176,8 +162,8 @@ void UploadDataStream::AdvanceToNextElement() { bool UploadDataStream::IsEOF() const { const std::vector<UploadData::Element>& elements = *upload_data_->elements(); - // Check if all elements are consumed and the buffer is empty. - if (element_index_ == elements.size() && !buf_len_) { + // Check if all elements are consumed. + if (element_index_ == elements.size()) { // If the upload data is chunked, check if the last element is the // last chunk. if (!upload_data_->is_chunked() || @@ -188,17 +174,6 @@ bool UploadDataStream::IsEOF() const { return false; } -bool UploadDataStream::IsOnLastChunk() const { - DCHECK(initialized_successfully_); - - const std::vector<UploadData::Element>& elements = *upload_data_->elements(); - DCHECK(upload_data_->is_chunked()); - return (eof_ || - (!elements.empty() && - element_index_ == elements.size() && - elements.back().is_last_chunk())); -} - bool UploadDataStream::IsInMemory() const { DCHECK(initialized_successfully_); diff --git a/net/base/upload_data_stream.h b/net/base/upload_data_stream.h index 47c188c..ad70b09 100644 --- a/net/base/upload_data_stream.h +++ b/net/base/upload_data_stream.h @@ -29,23 +29,20 @@ class NET_EXPORT UploadDataStream { // files) and the target file is changed. int Init(); - // Returns the stream's buffer. - IOBuffer* buf() const { return buf_; } - // Returns the length of the data in the stream's buffer. - size_t buf_len() const { return buf_len_; } - - // TODO(satish): We should ideally have UploadDataStream expose a Read() - // method which returns data in a caller provided IOBuffer. That would do away - // with this function and make the interface cleaner as well with less memmove - // calls. + // Reads up to |buf_len| bytes from the upload data stream to |buf|. The + // number of bytes read is returned. Partial reads are allowed. Zero is + // returned on a call to Read when there are no remaining bytes in the + // stream, and IsEof() will return true hereafter. // - // Returns the size of the stream's buffer pointed by buf(). - static size_t GetBufferSize(); - - // Call to indicate that a portion of the stream's buffer was consumed. This - // call modifies the stream's buffer so that it contains the next segment of - // the upload data to be consumed. - void MarkConsumedAndFillBuffer(size_t num_bytes); + // If there's less data to read than we initially observed (i.e. the actual + // upload data is smaller than size()), zeros are padded to ensure that + // size() bytes can be read, which can happen for TYPE_FILE payloads. + // + // If the upload data stream is chunked (i.e. is_chunked() is true), + // ERR_IO_PENDING is returned to indicate there is nothing to read at the + // moment, but more data to come at a later time. If not chunked, reads + // won't fail. + int Read(IOBuffer* buf, int buf_len); // Sets the callback to be invoked when new chunks are available to upload. void set_chunk_callback(ChunkCallback* callback) { @@ -61,15 +58,9 @@ class NET_EXPORT UploadDataStream { bool is_chunked() const { return upload_data_->is_chunked(); } - // Returns whether there is no more data to read, regardless of whether - // position < size. - bool eof() const { return eof_; } - - // Returns whether the data available in buf() includes the last chunk in a - // chunked data stream. This method returns true once the final chunk has been - // placed in the IOBuffer returned by buf(), in contrast to eof() which - // returns true only after the data in buf() has been consumed. - bool IsOnLastChunk() const; + // Returns true if all data has been consumed from this upload data + // stream. + bool IsEOF() const; // Returns true if the upload data in the stream is entirely in memory. bool IsInMemory() const; @@ -78,27 +69,11 @@ class NET_EXPORT UploadDataStream { static void set_merge_chunks(bool merge) { merge_chunks_ = merge; } private: - // Fills the buffer with any remaining data and sets eof_ if there was nothing - // left to fill the buffer with. - // Returns OK if the operation succeeds. Otherwise error code is returned. - int FillBuffer(); - // Advances to the next element. Updates the internal states. void AdvanceToNextElement(); - // Returns true if all data has been consumed from this upload data - // stream. - bool IsEOF() const; - scoped_refptr<UploadData> upload_data_; - // This buffer is filled with data to be uploaded. The data to be sent is - // always at the front of the buffer. If we cannot send all of the buffer at - // once, then we memmove the remaining portion and back-fill the buffer for - // the next "write" call. buf_len_ indicates how much data is in the buffer. - scoped_refptr<IOBuffer> buf_; - size_t buf_len_; - // Index of the current upload element (i.e. the element currently being // read). The index is used as a cursor to iterate over elements in // |upload_data_|. @@ -120,17 +95,12 @@ class NET_EXPORT UploadDataStream { uint64 total_size_; uint64 current_position_; - // Whether there is no data left to read. - bool eof_; - // True if the initialization was successful. bool initialized_successfully_; // TODO(satish): Remove this once we have a better way to unit test POST // requests with chunked uploads. static bool merge_chunks_; - // The size of the stream's buffer pointed by buf_. - static const size_t kBufferSize; DISALLOW_COPY_AND_ASSIGN(UploadDataStream); }; diff --git a/net/base/upload_data_stream_unittest.cc b/net/base/upload_data_stream_unittest.cc index ebf9f47..fc1ba69 100644 --- a/net/base/upload_data_stream_unittest.cc +++ b/net/base/upload_data_stream_unittest.cc @@ -11,6 +11,7 @@ #include "base/file_util.h" #include "base/memory/scoped_ptr.h" #include "base/time.h" +#include "net/base/io_buffer.h" #include "net/base/net_errors.h" #include "net/base/upload_data.h" #include "testing/gtest/include/gtest/gtest.h" @@ -22,6 +23,7 @@ namespace { const char kTestData[] = "0123456789"; const size_t kTestDataSize = arraysize(kTestData) - 1; +const size_t kTestBufferSize = 1 << 14; // 16KB. } // namespace @@ -43,7 +45,7 @@ TEST_F(UploadDataStreamTest, EmptyUploadData) { ASSERT_TRUE(stream.get()); EXPECT_EQ(0U, stream->size()); EXPECT_EQ(0U, stream->position()); - EXPECT_TRUE(stream->eof()); + EXPECT_TRUE(stream->IsEOF()); } TEST_F(UploadDataStreamTest, ConsumeAll) { @@ -53,11 +55,14 @@ TEST_F(UploadDataStreamTest, ConsumeAll) { ASSERT_TRUE(stream.get()); EXPECT_EQ(kTestDataSize, stream->size()); EXPECT_EQ(0U, stream->position()); - EXPECT_FALSE(stream->eof()); - while (!stream->eof()) { - stream->MarkConsumedAndFillBuffer(stream->buf_len()); + EXPECT_FALSE(stream->IsEOF()); + scoped_refptr<IOBuffer> buf = new IOBuffer(kTestBufferSize); + while (!stream->IsEOF()) { + int bytes_read = stream->Read(buf, kTestBufferSize); + ASSERT_LE(0, bytes_read); // Not an error. } EXPECT_EQ(kTestDataSize, stream->position()); + ASSERT_TRUE(stream->IsEOF()); } TEST_F(UploadDataStreamTest, FileSmallerThanLength) { @@ -80,11 +85,13 @@ TEST_F(UploadDataStreamTest, FileSmallerThanLength) { ASSERT_TRUE(stream.get()); EXPECT_EQ(kFakeSize, stream->size()); EXPECT_EQ(0U, stream->position()); - EXPECT_FALSE(stream->eof()); + EXPECT_FALSE(stream->IsEOF()); uint64 read_counter = 0; - while (!stream->eof()) { - read_counter += stream->buf_len(); - stream->MarkConsumedAndFillBuffer(stream->buf_len()); + scoped_refptr<IOBuffer> buf = new IOBuffer(kTestBufferSize); + while (!stream->IsEOF()) { + int bytes_read = stream->Read(buf, kTestBufferSize); + ASSERT_LE(0, bytes_read); // Not an error. + read_counter += bytes_read; EXPECT_EQ(read_counter, stream->position()); } // UpdateDataStream will pad out the file with 0 bytes so that the HTTP diff --git a/net/http/http_stream_parser.cc b/net/http/http_stream_parser.cc index 72f4ac9..6b81a66 100644 --- a/net/http/http_stream_parser.cc +++ b/net/http/http_stream_parser.cc @@ -21,7 +21,8 @@ namespace { -static const size_t kMaxMergedHeaderAndBodySize = 1400; +const size_t kMaxMergedHeaderAndBodySize = 1400; +const size_t kRequestBodyBufferSize = 1 << 14; // 16KB std::string GetResponseHeaderLines(const net::HttpResponseHeaders& headers) { std::string raw_headers = headers.raw_headers(); @@ -181,7 +182,6 @@ HttpStreamParser::HttpStreamParser(ClientSocketHandle* connection, io_callback_( base::Bind(&HttpStreamParser::OnIOComplete, base::Unretained(this)))), - chunk_length_without_encoding_(0), sent_last_chunk_(false) { } @@ -220,12 +220,15 @@ int HttpStreamParser::SendRequest(const std::string& request_line, std::string request = request_line + headers.ToString(); request_body_.reset(request_body); - if (request_body_ != NULL && request_body_->is_chunked()) { - request_body_->set_chunk_callback(this); - // The chunk buffer is guaranteed to be large enough to hold the encoded - // chunk. - chunk_buf_ = new SeekableIOBuffer(UploadDataStream::GetBufferSize() + - kChunkHeaderFooterSize); + if (request_body_ != NULL) { + request_body_buf_ = new SeekableIOBuffer(kRequestBodyBufferSize); + if (request_body_->is_chunked()) { + request_body_->set_chunk_callback(this); + // The chunk buffer is adjusted to guarantee that |request_body_buf_| + // is large enough to hold the encoded chunk. + chunk_buf_ = new IOBufferWithSize(kRequestBodyBufferSize - + kChunkHeaderFooterSize); + } } io_state_ = STATE_SENDING_HEADERS; @@ -242,19 +245,19 @@ int HttpStreamParser::SendRequest(const std::string& request_line, request_headers_ = new DrainableIOBuffer( merged_request_headers_and_body, merged_size); - char *buf = request_headers_->data(); - memcpy(buf, request.data(), request.size()); - buf += request.size(); + memcpy(request_headers_->data(), request.data(), request.size()); + request_headers_->DidConsume(request.size()); size_t todo = request_body_->size(); while (todo) { - size_t buf_len = request_body_->buf_len(); - memcpy(buf, request_body_->buf()->data(), buf_len); - todo -= buf_len; - buf += buf_len; - request_body_->MarkConsumedAndFillBuffer(buf_len); + int consumed = request_body_->Read(request_headers_, todo); + DCHECK_GT(consumed, 0); // Read() won't fail if not chunked. + request_headers_->DidConsume(consumed); + todo -= consumed; } - DCHECK(request_body_->eof()); + DCHECK(request_body_->IsEOF()); + // Reset the offset, so the buffer can be read from the beginning. + request_headers_->SetOffset(0); did_merge = true; } @@ -428,8 +431,8 @@ int HttpStreamParser::DoSendHeaders(int result) { io_state_ = STATE_SENDING_CHUNKED_BODY; result = OK; } else if (request_body_ != NULL && request_body_->size() > 0 && - // !eof() indicates that the body wasn't merged. - !request_body_->eof()) { + // !IsEOF() indicates that the body wasn't merged. + !request_body_->IsEOF()) { io_state_ = STATE_SENDING_NON_CHUNKED_BODY; result = OK; } else { @@ -442,11 +445,11 @@ int HttpStreamParser::DoSendChunkedBody(int result) { // |result| is the number of bytes sent from the last call to // DoSendChunkedBody(), or 0 (i.e. OK) the first time. - // Send the remaining data in the chunk buffer. - chunk_buf_->DidConsume(result); - if (chunk_buf_->BytesRemaining() > 0) { - return connection_->socket()->Write(chunk_buf_, - chunk_buf_->BytesRemaining(), + // Send the remaining data in the request body buffer. + request_body_buf_->DidConsume(result); + if (request_body_buf_->BytesRemaining() > 0) { + return connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), io_callback_); } @@ -455,32 +458,33 @@ int HttpStreamParser::DoSendChunkedBody(int result) { return OK; } - // |chunk_length_without_encoding_| is 0 when DoSendBody() is first - // called, hence the first call to MarkConsumedAndFillBuffer() is a noop. - request_body_->MarkConsumedAndFillBuffer(chunk_length_without_encoding_); - chunk_length_without_encoding_ = 0; - - if (request_body_->eof()) { - chunk_buf_->Clear(); - const int chunk_length = EncodeChunk( - base::StringPiece(), chunk_buf_->data(), chunk_buf_->capacity()); - chunk_buf_->DidAppend(chunk_length); + const int consumed = request_body_->Read(chunk_buf_, chunk_buf_->size()); + if (consumed == 0) { // Reached the end. + DCHECK(request_body_->IsEOF()); + request_body_buf_->Clear(); + const int chunk_length = EncodeChunk(base::StringPiece(), + request_body_buf_->data(), + request_body_buf_->capacity()); + request_body_buf_->DidAppend(chunk_length); sent_last_chunk_ = true; - } else if (request_body_->buf_len() > 0) { + } else if (consumed > 0) { // Encode and send the buffer as 1 chunk. - const base::StringPiece payload(request_body_->buf()->data(), - request_body_->buf_len()); - chunk_buf_->Clear(); - const int chunk_length = EncodeChunk( - payload, chunk_buf_->data(), chunk_buf_->capacity()); - chunk_buf_->DidAppend(chunk_length); - chunk_length_without_encoding_ = payload.size(); - } else { - // Nothing to send. More POST data is yet to come? + const base::StringPiece payload(chunk_buf_->data(), consumed); + request_body_buf_->Clear(); + const int chunk_length = EncodeChunk(payload, + request_body_buf_->data(), + request_body_buf_->capacity()); + request_body_buf_->DidAppend(chunk_length); + } else if (consumed == ERR_IO_PENDING) { + // Nothing to send. More POST data is yet to come. return ERR_IO_PENDING; + } else { + // There won't be other errors. + NOTREACHED(); } - return connection_->socket()->Write(chunk_buf_, chunk_buf_->BytesRemaining(), + return connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), io_callback_); } @@ -488,15 +492,27 @@ int HttpStreamParser::DoSendNonChunkedBody(int result) { // |result| is the number of bytes sent from the last call to // DoSendNonChunkedBody(), or 0 (i.e. OK) the first time. - // The first call to MarkConsumedAndFillBuffer() is a noop as |result| is 0. - request_body_->MarkConsumedAndFillBuffer(result); + // Send the remaining data in the request body buffer. + request_body_buf_->DidConsume(result); + if (request_body_buf_->BytesRemaining() > 0) { + return connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), + io_callback_); + } - if (!request_body_->eof()) { - int buf_len = static_cast<int>(request_body_->buf_len()); - result = connection_->socket()->Write(request_body_->buf(), buf_len, + request_body_buf_->Clear(); + const int consumed = request_body_->Read(request_body_buf_, + request_body_buf_->capacity()); + if (consumed == 0) { // Reached the end. + io_state_ = STATE_REQUEST_SENT; + } else if (consumed > 0) { + request_body_buf_->DidAppend(consumed); + result = connection_->socket()->Write(request_body_buf_, + request_body_buf_->BytesRemaining(), io_callback_); } else { - io_state_ = STATE_REQUEST_SENT; + // UploadDataStream::Read() won't fail if not chunked. + NOTREACHED(); } return result; } diff --git a/net/http/http_stream_parser.h b/net/http/http_stream_parser.h index 5740bb3..3d63123 100644 --- a/net/http/http_stream_parser.h +++ b/net/http/http_stream_parser.h @@ -225,7 +225,9 @@ class NET_EXPORT_PRIVATE HttpStreamParser : public ChunkCallback { // Stores an encoded chunk for chunked uploads. // Note: This should perhaps be improved to not create copies of the data. - scoped_refptr<SeekableIOBuffer> chunk_buf_; + scoped_refptr<IOBufferWithSize> chunk_buf_; + // Temporary buffer to read the request body from UploadDataStream. + scoped_refptr<SeekableIOBuffer> request_body_buf_; size_t chunk_length_without_encoding_; bool sent_last_chunk_; diff --git a/net/spdy/spdy_http_stream.cc b/net/spdy/spdy_http_stream.cc index ff96716..bf6dfce 100644 --- a/net/spdy/spdy_http_stream.cc +++ b/net/spdy/spdy_http_stream.cc @@ -211,10 +211,16 @@ int SpdyHttpStream::SendRequest(const HttpRequestHeaders& request_headers, CHECK(!request_body_stream_.get()); if (request_body) { - if (request_body->size() || request_body->is_chunked()) + if (request_body->size() || request_body->is_chunked()) { request_body_stream_.reset(request_body); - else + // Use kMaxSpdyFrameChunkSize as the buffer size, since the request + // body data is written with this size at a time. + raw_request_body_buf_ = new IOBufferWithSize(kMaxSpdyFrameChunkSize); + // The request body buffer is empty at first. + request_body_buf_ = new DrainableIOBuffer(raw_request_body_buf_, 0); + } else { delete request_body; + } } CHECK(!callback.is_null()); @@ -276,31 +282,50 @@ bool SpdyHttpStream::OnSendHeadersComplete(int status) { int SpdyHttpStream::OnSendBody() { CHECK(request_body_stream_.get()); - int buf_len = static_cast<int>(request_body_stream_->buf_len()); - if (!buf_len) + // TODO(satorux): Clean up the logic here. This behavior is weird. Reading + // of upload data should happen in OnSendBody(). crbug.com/113107. + // + // Nothing to send. This happens when OnSendBody() is first called. + // A read of the upload data stream is initiated in OnSendBodyComplete(). + if (request_body_buf_->BytesRemaining() == 0) return OK; - bool is_chunked = request_body_stream_->is_chunked(); - // TODO(satish): For non-chunked POST data, we set DATA_FLAG_FIN for all - // blocks of data written out. This is wrong if the POST data was larger than - // UploadDataStream::kBufSize as that is the largest buffer that - // UploadDataStream returns at a time and we'll be setting the FIN flag for - // each block of data written out. - bool eof = !is_chunked || request_body_stream_->IsOnLastChunk(); + + const bool eof = request_body_stream_->IsEOF(); return stream_->WriteStreamData( - request_body_stream_->buf(), buf_len, + request_body_buf_, + request_body_buf_->BytesRemaining(), eof ? spdy::DATA_FLAG_FIN : spdy::DATA_FLAG_NONE); } int SpdyHttpStream::OnSendBodyComplete(int status, bool* eof) { + // |status| is the number of bytes written to the SPDY stream. CHECK(request_body_stream_.get()); + *eof = false; + + if (status > 0) { + request_body_buf_->DidConsume(status); + if (request_body_buf_->BytesRemaining()) { + // Go back to OnSendBody() to send the remaining data. + return OK; + } + } + + // Check if the entire body data has been sent. + *eof = (request_body_stream_->IsEOF() && + !request_body_buf_->BytesRemaining()); + if (*eof) + return OK; - request_body_stream_->MarkConsumedAndFillBuffer(status); - *eof = request_body_stream_->eof(); - if (!*eof && - request_body_stream_->is_chunked() && - !request_body_stream_->buf_len()) + // Read the data from the request body stream. + const int bytes_read = request_body_stream_->Read( + raw_request_body_buf_, raw_request_body_buf_->size()); + if (request_body_stream_->is_chunked() && bytes_read == ERR_IO_PENDING) return ERR_IO_PENDING; + // ERR_IO_PENDING with chunked encoding is the only possible error. + DCHECK_GE(bytes_read, 0); + request_body_buf_ = new DrainableIOBuffer(raw_request_body_buf_, + bytes_read); return OK; } diff --git a/net/spdy/spdy_http_stream.h b/net/spdy/spdy_http_stream.h index f8612af..2c087ed 100644 --- a/net/spdy/spdy_http_stream.h +++ b/net/spdy/spdy_http_stream.h @@ -1,4 +1,4 @@ -// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -22,6 +22,7 @@ namespace net { +class DrainableIOBuffer; class HttpResponseInfo; class IOBuffer; class SpdySession; @@ -125,6 +126,11 @@ class NET_EXPORT_PRIVATE SpdyHttpStream : public SpdyStream::Delegate, scoped_refptr<IOBuffer> user_buffer_; int user_buffer_len_; + // Temporary buffer used to read the request body from UploadDataStream. + scoped_refptr<IOBufferWithSize> raw_request_body_buf_; + // Wraps raw_request_body_buf_ to read the remaining data progressively. + scoped_refptr<DrainableIOBuffer> request_body_buf_; + // Is there a scheduled read callback pending. bool buffered_read_callback_pending_; // Has more data been received from the network during the wait for the diff --git a/net/spdy/spdy_network_transaction_unittest.cc b/net/spdy/spdy_network_transaction_unittest.cc index ea4c397..47b4f6a 100644 --- a/net/spdy/spdy_network_transaction_unittest.cc +++ b/net/spdy/spdy_network_transaction_unittest.cc @@ -2226,7 +2226,12 @@ TEST_P(SpdyNetworkTransactionTest, FlowControlStallResume) { ASSERT_TRUE(stream != NULL); ASSERT_TRUE(stream->stream() != NULL); EXPECT_EQ(0, stream->stream()->send_window_size()); - EXPECT_FALSE(stream->request_body_stream_->eof()); + // All the body data should have been read. + // TODO(satorux): This is because of the weirdness in reading the request + // body in OnSendBodyComplete(). See crbug.com/113107. + EXPECT_TRUE(stream->request_body_stream_->IsEOF()); + // But the body is not yet fully sent ("hello!" is not yet sent). + EXPECT_FALSE(stream->stream()->body_sent()); data->ForceNextRead(); // Read in WINDOW_UPDATE frame. rv = callback.WaitForResult(); diff --git a/net/spdy/spdy_stream.h b/net/spdy/spdy_stream.h index 432e3e51..0512dce 100644 --- a/net/spdy/spdy_stream.h +++ b/net/spdy/spdy_stream.h @@ -208,6 +208,9 @@ class NET_EXPORT_PRIVATE SpdyStream void Close(); bool cancelled() const { return cancelled_; } bool closed() const { return io_state_ == STATE_DONE; } + // TODO(satorux): This is only for testing. We should be able to remove + // this once crbug.com/113107 is addressed. + bool body_sent() const { return io_state_ > STATE_SEND_BODY_COMPLETE; } // Interface for Spdy[Http|WebSocket]Stream to use. |