summaryrefslogtreecommitdiffstats
path: root/net/base
diff options
context:
space:
mode:
authorsatorux@chromium.org <satorux@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-02-08 06:07:00 +0000
committersatorux@chromium.org <satorux@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-02-08 06:07:00 +0000
commit0ab2e24c5fd9b65352e54456954f6a9406c2f8df (patch)
tree245bc5d1be6c3119e8e00c804cbbe5cb9d0c11c3 /net/base
parent631a595935c53d14158fd5ec2e9348a249a34957 (diff)
downloadchromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.zip
chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.tar.gz
chromium_src-0ab2e24c5fd9b65352e54456954f6a9406c2f8df.tar.bz2
net: Rework UploadDataStream API by introducing Read().
Replace buf()+buf_len()+MarkConsumedAndFillBuffer()+GetBufferSize() with a single function Read(). This is done by externalizing the read buffer. As a byproduct, use of memmove() in handling of SPDY HTTP requests is significantly reduced. There, a write is done with kMaxSpdyFrameChunkSize which is about 2.8KB, at a time, that caused MarkConsumedAndFillBuffer() to move the remaining 13.2KB data in the internal buffer to the beginning by memmove(), which ends up memmoving 13.2KB of data every time a chunk of 2.8KB is written. Along the way, UploadDataStream::IsOnLastChunk() is removed, which is no longer necessary. Some TODO(satish)s have also been addressed. This is in preparation for adding asynchronous API to UploadDataStream. This is why Read() takes IOBuffer*, rather than char*. BUG=72001 TEST=try bots Review URL: https://chromiumcodereview.appspot.com/9317055 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@120946 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/base')
-rw-r--r--net/base/upload_data_stream.cc117
-rw-r--r--net/base/upload_data_stream.h62
-rw-r--r--net/base/upload_data_stream_unittest.cc23
3 files changed, 77 insertions, 125 deletions
diff --git a/net/base/upload_data_stream.cc b/net/base/upload_data_stream.cc
index 550a117..937ee09 100644
--- a/net/base/upload_data_stream.cc
+++ b/net/base/upload_data_stream.cc
@@ -13,9 +13,18 @@
namespace net {
-const size_t UploadDataStream::kBufferSize = 16384;
bool UploadDataStream::merge_chunks_ = true;
+UploadDataStream::UploadDataStream(UploadData* upload_data)
+ : upload_data_(upload_data),
+ element_index_(0),
+ element_offset_(0),
+ element_file_bytes_remaining_(0),
+ total_size_(0),
+ current_position_(0),
+ initialized_successfully_(false) {
+}
+
UploadDataStream::~UploadDataStream() {
}
@@ -23,56 +32,43 @@ int UploadDataStream::Init() {
DCHECK(!initialized_successfully_);
total_size_ = upload_data_->GetContentLength();
- const int result = FillBuffer();
- initialized_successfully_ = (result == OK);
- return result;
-}
-// static
-size_t UploadDataStream::GetBufferSize() {
- return kBufferSize;
-}
-
-void UploadDataStream::MarkConsumedAndFillBuffer(size_t num_bytes) {
- DCHECK(initialized_successfully_);
- DCHECK_LE(num_bytes, buf_len_);
- DCHECK(!eof_);
-
- if (num_bytes) {
- buf_len_ -= num_bytes;
- // Move the remaining data to the beginning.
- if (buf_len_ > 0)
- memmove(buf_->data(), buf_->data() + num_bytes, buf_len_);
+ // If the underlying file has been changed and the expected file
+ // modification time is set, treat it as error. Note that the expected
+ // modification time from WebKit is based on time_t precision. So we
+ // have to convert both to time_t to compare. This check is used for
+ // sliced files.
+ const std::vector<UploadData::Element>& elements = *upload_data_->elements();
+ for (size_t i = 0; i < elements.size(); ++i) {
+ const UploadData::Element& element = elements[i];
+ if (element.type() == UploadData::TYPE_FILE &&
+ !element.expected_file_modification_time().is_null()) {
+ // Temporarily allow until fix: http://crbug.com/72001.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+ base::PlatformFileInfo info;
+ if (file_util::GetFileInfo(element.file_path(), &info) &&
+ element.expected_file_modification_time().ToTimeT() !=
+ info.last_modified.ToTimeT()) {
+ return ERR_UPLOAD_FILE_CHANGED;
+ }
+ }
}
- FillBuffer();
-
- current_position_ += num_bytes;
-}
-
-UploadDataStream::UploadDataStream(UploadData* upload_data)
- : upload_data_(upload_data),
- buf_(new IOBuffer(kBufferSize)),
- buf_len_(0),
- element_index_(0),
- element_offset_(0),
- element_file_bytes_remaining_(0),
- total_size_(0),
- current_position_(0),
- eof_(false),
- initialized_successfully_(false) {
+ initialized_successfully_ = true;
+ return OK;
}
-int UploadDataStream::FillBuffer() {
+int UploadDataStream::Read(IOBuffer* buf, int buf_len) {
std::vector<UploadData::Element>& elements = *upload_data_->elements();
- while (buf_len_ < kBufferSize && element_index_ < elements.size()) {
+ int bytes_copied = 0;
+ while (bytes_copied < buf_len && element_index_ < elements.size()) {
bool advance_to_next_element = false;
// This is not const as GetContentLength() is not const.
UploadData::Element& element = elements[element_index_];
- const size_t free_buffer_space = kBufferSize - buf_len_;
+ const size_t free_buffer_space = buf_len - bytes_copied;
if (element.type() == UploadData::TYPE_BYTES ||
element.type() == UploadData::TYPE_CHUNK) {
const std::vector<char>& element_data = element.bytes();
@@ -86,10 +82,9 @@ int UploadDataStream::FillBuffer() {
// the address of an element in |element_data| and that will throw an
// exception if |element_data| is an empty vector.
if (num_bytes_to_copy > 0) {
- memcpy(buf_->data() + buf_len_,
- &element_data[element_offset_],
+ memcpy(buf->data() + bytes_copied, &element_data[element_offset_],
num_bytes_to_copy);
- buf_len_ += num_bytes_to_copy;
+ bytes_copied += num_bytes_to_copy;
element_offset_ += num_bytes_to_copy;
}
@@ -102,17 +97,6 @@ int UploadDataStream::FillBuffer() {
// Open the file of the current element if not yet opened.
if (!element_file_stream_.get()) {
- // If the underlying file has been changed, treat it as error.
- // Note that the expected modification time from WebKit is based on
- // time_t precision. So we have to convert both to time_t to compare.
- if (!element.expected_file_modification_time().is_null()) {
- base::PlatformFileInfo info;
- if (file_util::GetFileInfo(element.file_path(), &info) &&
- element.expected_file_modification_time().ToTimeT() !=
- info.last_modified.ToTimeT()) {
- return ERR_UPLOAD_FILE_CHANGED;
- }
- }
element_file_bytes_remaining_ = element.GetContentLength();
element_file_stream_.reset(element.NewFileStreamForReading());
}
@@ -128,7 +112,7 @@ int UploadDataStream::FillBuffer() {
// missing or not readable.
if (element_file_stream_.get()) {
num_bytes_consumed =
- element_file_stream_->Read(buf_->data() + buf_len_,
+ element_file_stream_->Read(buf->data() + bytes_copied,
num_bytes_to_read,
CompletionCallback());
}
@@ -136,10 +120,10 @@ int UploadDataStream::FillBuffer() {
// If there's less data to read than we initially observed, then
// pad with zero. Otherwise the server will hang waiting for the
// rest of the data.
- memset(buf_->data() + buf_len_, 0, num_bytes_to_read);
+ memset(buf->data() + bytes_copied, 0, num_bytes_to_read);
num_bytes_consumed = num_bytes_to_read;
}
- buf_len_ += num_bytes_consumed;
+ bytes_copied += num_bytes_consumed;
element_file_bytes_remaining_ -= num_bytes_consumed;
}
@@ -156,9 +140,11 @@ int UploadDataStream::FillBuffer() {
break;
}
- eof_ = IsEOF();
+ current_position_ += bytes_copied;
+ if (is_chunked() && !IsEOF() && bytes_copied == 0)
+ return ERR_IO_PENDING;
- return OK;
+ return bytes_copied;
}
void UploadDataStream::AdvanceToNextElement() {
@@ -176,8 +162,8 @@ void UploadDataStream::AdvanceToNextElement() {
bool UploadDataStream::IsEOF() const {
const std::vector<UploadData::Element>& elements = *upload_data_->elements();
- // Check if all elements are consumed and the buffer is empty.
- if (element_index_ == elements.size() && !buf_len_) {
+ // Check if all elements are consumed.
+ if (element_index_ == elements.size()) {
// If the upload data is chunked, check if the last element is the
// last chunk.
if (!upload_data_->is_chunked() ||
@@ -188,17 +174,6 @@ bool UploadDataStream::IsEOF() const {
return false;
}
-bool UploadDataStream::IsOnLastChunk() const {
- DCHECK(initialized_successfully_);
-
- const std::vector<UploadData::Element>& elements = *upload_data_->elements();
- DCHECK(upload_data_->is_chunked());
- return (eof_ ||
- (!elements.empty() &&
- element_index_ == elements.size() &&
- elements.back().is_last_chunk()));
-}
-
bool UploadDataStream::IsInMemory() const {
DCHECK(initialized_successfully_);
diff --git a/net/base/upload_data_stream.h b/net/base/upload_data_stream.h
index 47c188c..ad70b09 100644
--- a/net/base/upload_data_stream.h
+++ b/net/base/upload_data_stream.h
@@ -29,23 +29,20 @@ class NET_EXPORT UploadDataStream {
// files) and the target file is changed.
int Init();
- // Returns the stream's buffer.
- IOBuffer* buf() const { return buf_; }
- // Returns the length of the data in the stream's buffer.
- size_t buf_len() const { return buf_len_; }
-
- // TODO(satish): We should ideally have UploadDataStream expose a Read()
- // method which returns data in a caller provided IOBuffer. That would do away
- // with this function and make the interface cleaner as well with less memmove
- // calls.
+ // Reads up to |buf_len| bytes from the upload data stream to |buf|. The
+ // number of bytes read is returned. Partial reads are allowed. Zero is
+ // returned on a call to Read when there are no remaining bytes in the
+ // stream, and IsEof() will return true hereafter.
//
- // Returns the size of the stream's buffer pointed by buf().
- static size_t GetBufferSize();
-
- // Call to indicate that a portion of the stream's buffer was consumed. This
- // call modifies the stream's buffer so that it contains the next segment of
- // the upload data to be consumed.
- void MarkConsumedAndFillBuffer(size_t num_bytes);
+ // If there's less data to read than we initially observed (i.e. the actual
+ // upload data is smaller than size()), zeros are padded to ensure that
+ // size() bytes can be read, which can happen for TYPE_FILE payloads.
+ //
+ // If the upload data stream is chunked (i.e. is_chunked() is true),
+ // ERR_IO_PENDING is returned to indicate there is nothing to read at the
+ // moment, but more data to come at a later time. If not chunked, reads
+ // won't fail.
+ int Read(IOBuffer* buf, int buf_len);
// Sets the callback to be invoked when new chunks are available to upload.
void set_chunk_callback(ChunkCallback* callback) {
@@ -61,15 +58,9 @@ class NET_EXPORT UploadDataStream {
bool is_chunked() const { return upload_data_->is_chunked(); }
- // Returns whether there is no more data to read, regardless of whether
- // position < size.
- bool eof() const { return eof_; }
-
- // Returns whether the data available in buf() includes the last chunk in a
- // chunked data stream. This method returns true once the final chunk has been
- // placed in the IOBuffer returned by buf(), in contrast to eof() which
- // returns true only after the data in buf() has been consumed.
- bool IsOnLastChunk() const;
+ // Returns true if all data has been consumed from this upload data
+ // stream.
+ bool IsEOF() const;
// Returns true if the upload data in the stream is entirely in memory.
bool IsInMemory() const;
@@ -78,27 +69,11 @@ class NET_EXPORT UploadDataStream {
static void set_merge_chunks(bool merge) { merge_chunks_ = merge; }
private:
- // Fills the buffer with any remaining data and sets eof_ if there was nothing
- // left to fill the buffer with.
- // Returns OK if the operation succeeds. Otherwise error code is returned.
- int FillBuffer();
-
// Advances to the next element. Updates the internal states.
void AdvanceToNextElement();
- // Returns true if all data has been consumed from this upload data
- // stream.
- bool IsEOF() const;
-
scoped_refptr<UploadData> upload_data_;
- // This buffer is filled with data to be uploaded. The data to be sent is
- // always at the front of the buffer. If we cannot send all of the buffer at
- // once, then we memmove the remaining portion and back-fill the buffer for
- // the next "write" call. buf_len_ indicates how much data is in the buffer.
- scoped_refptr<IOBuffer> buf_;
- size_t buf_len_;
-
// Index of the current upload element (i.e. the element currently being
// read). The index is used as a cursor to iterate over elements in
// |upload_data_|.
@@ -120,17 +95,12 @@ class NET_EXPORT UploadDataStream {
uint64 total_size_;
uint64 current_position_;
- // Whether there is no data left to read.
- bool eof_;
-
// True if the initialization was successful.
bool initialized_successfully_;
// TODO(satish): Remove this once we have a better way to unit test POST
// requests with chunked uploads.
static bool merge_chunks_;
- // The size of the stream's buffer pointed by buf_.
- static const size_t kBufferSize;
DISALLOW_COPY_AND_ASSIGN(UploadDataStream);
};
diff --git a/net/base/upload_data_stream_unittest.cc b/net/base/upload_data_stream_unittest.cc
index ebf9f47..fc1ba69 100644
--- a/net/base/upload_data_stream_unittest.cc
+++ b/net/base/upload_data_stream_unittest.cc
@@ -11,6 +11,7 @@
#include "base/file_util.h"
#include "base/memory/scoped_ptr.h"
#include "base/time.h"
+#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
#include "net/base/upload_data.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -22,6 +23,7 @@ namespace {
const char kTestData[] = "0123456789";
const size_t kTestDataSize = arraysize(kTestData) - 1;
+const size_t kTestBufferSize = 1 << 14; // 16KB.
} // namespace
@@ -43,7 +45,7 @@ TEST_F(UploadDataStreamTest, EmptyUploadData) {
ASSERT_TRUE(stream.get());
EXPECT_EQ(0U, stream->size());
EXPECT_EQ(0U, stream->position());
- EXPECT_TRUE(stream->eof());
+ EXPECT_TRUE(stream->IsEOF());
}
TEST_F(UploadDataStreamTest, ConsumeAll) {
@@ -53,11 +55,14 @@ TEST_F(UploadDataStreamTest, ConsumeAll) {
ASSERT_TRUE(stream.get());
EXPECT_EQ(kTestDataSize, stream->size());
EXPECT_EQ(0U, stream->position());
- EXPECT_FALSE(stream->eof());
- while (!stream->eof()) {
- stream->MarkConsumedAndFillBuffer(stream->buf_len());
+ EXPECT_FALSE(stream->IsEOF());
+ scoped_refptr<IOBuffer> buf = new IOBuffer(kTestBufferSize);
+ while (!stream->IsEOF()) {
+ int bytes_read = stream->Read(buf, kTestBufferSize);
+ ASSERT_LE(0, bytes_read); // Not an error.
}
EXPECT_EQ(kTestDataSize, stream->position());
+ ASSERT_TRUE(stream->IsEOF());
}
TEST_F(UploadDataStreamTest, FileSmallerThanLength) {
@@ -80,11 +85,13 @@ TEST_F(UploadDataStreamTest, FileSmallerThanLength) {
ASSERT_TRUE(stream.get());
EXPECT_EQ(kFakeSize, stream->size());
EXPECT_EQ(0U, stream->position());
- EXPECT_FALSE(stream->eof());
+ EXPECT_FALSE(stream->IsEOF());
uint64 read_counter = 0;
- while (!stream->eof()) {
- read_counter += stream->buf_len();
- stream->MarkConsumedAndFillBuffer(stream->buf_len());
+ scoped_refptr<IOBuffer> buf = new IOBuffer(kTestBufferSize);
+ while (!stream->IsEOF()) {
+ int bytes_read = stream->Read(buf, kTestBufferSize);
+ ASSERT_LE(0, bytes_read); // Not an error.
+ read_counter += bytes_read;
EXPECT_EQ(read_counter, stream->position());
}
// UpdateDataStream will pad out the file with 0 bytes so that the HTTP