summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorerg@google.com <erg@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-21 23:46:49 +0000
committererg@google.com <erg@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2011-01-21 23:46:49 +0000
commit7aefb155470efae35c224f1ebb21f5ff893179f5 (patch)
tree6333871e5b8fdcfcafadefc95457e480c23fb8ad
parent9c678e93d80ab8ed8f52043e1096e2d08f97980e (diff)
downloadchromium_src-7aefb155470efae35c224f1ebb21f5ff893179f5.zip
chromium_src-7aefb155470efae35c224f1ebb21f5ff893179f5.tar.gz
chromium_src-7aefb155470efae35c224f1ebb21f5ff893179f5.tar.bz2
More net/ header/implementation method reordering.
(Contains some minor de-inlining.) BUG=68682 TEST=compiles Review URL: http://codereview.chromium.org/6263010 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@72232 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/disk_cache/bitmap.cc63
-rw-r--r--net/disk_cache/bitmap.h26
-rw-r--r--net/disk_cache/block_files.cc170
-rw-r--r--net/disk_cache/disk_cache_test_base.cc193
-rw-r--r--net/disk_cache/disk_cache_test_base.h12
-rw-r--r--net/disk_cache/mem_entry_impl.cc160
-rw-r--r--net/disk_cache/mem_entry_impl.h44
-rw-r--r--net/disk_cache/stats_histogram.cc12
-rw-r--r--net/proxy/proxy_resolver_v8.h4
-rw-r--r--net/proxy/proxy_script_fetcher_impl.cc48
-rw-r--r--net/proxy/proxy_script_fetcher_impl.h15
-rw-r--r--net/socket_stream/socket_stream.cc93
-rw-r--r--net/socket_stream/socket_stream_metrics.h27
-rw-r--r--net/spdy/spdy_framer.cc924
14 files changed, 913 insertions, 878 deletions
diff --git a/net/disk_cache/bitmap.cc b/net/disk_cache/bitmap.cc
index e025090..6c9aceb 100644
--- a/net/disk_cache/bitmap.cc
+++ b/net/disk_cache/bitmap.cc
@@ -4,6 +4,8 @@
#include "net/disk_cache/bitmap.h"
+#include <algorithm>
+
#include "base/logging.h"
namespace {
@@ -38,6 +40,31 @@ int FindLSBNonEmpty(uint32 word, bool value) {
namespace disk_cache {
+Bitmap::Bitmap(int num_bits, bool clear_bits)
+ : num_bits_(num_bits),
+ array_size_(RequiredArraySize(num_bits)),
+ alloc_(true) {
+ map_ = new uint32[array_size_];
+
+ // Initialize all of the bits.
+ if (clear_bits)
+ Clear();
+}
+
+Bitmap::Bitmap(uint32* map, int num_bits, int num_words)
+ : map_(map),
+ num_bits_(num_bits),
+ // If size is larger than necessary, trim because array_size_ is used
+ // as a bound by various methods.
+ array_size_(std::min(RequiredArraySize(num_bits), num_words)),
+ alloc_(false) {
+}
+
+Bitmap::~Bitmap() {
+ if (alloc_)
+ delete [] map_;
+}
+
void Bitmap::Resize(int num_bits, bool clear_bits) {
DCHECK(alloc_ || !map_);
const int old_maxsize = num_bits_;
@@ -105,24 +132,6 @@ void Bitmap::SetMap(const uint32* map, int size) {
memcpy(map_, map, std::min(size, array_size_) * sizeof(*map_));
}
-void Bitmap::SetWordBits(int start, int len, bool value) {
- DCHECK_LT(len, kIntBits);
- DCHECK_GE(len, 0);
- if (!len)
- return;
-
- int word = start / kIntBits;
- int offset = start % kIntBits;
-
- uint32 to_add = 0xffffffff << len;
- to_add = (~to_add) << offset;
- if (value) {
- map_[word] |= to_add;
- } else {
- map_[word] &= ~to_add;
- }
-}
-
void Bitmap::SetRange(int begin, int end, bool value) {
DCHECK_LE(begin, end);
int start_offset = begin & (kIntBits - 1);
@@ -281,4 +290,22 @@ int Bitmap::FindBits(int* index, int limit, bool value) const {
return end - *index;
}
+void Bitmap::SetWordBits(int start, int len, bool value) {
+ DCHECK_LT(len, kIntBits);
+ DCHECK_GE(len, 0);
+ if (!len)
+ return;
+
+ int word = start / kIntBits;
+ int offset = start % kIntBits;
+
+ uint32 to_add = 0xffffffff << len;
+ to_add = (~to_add) << offset;
+ if (value) {
+ map_[word] |= to_add;
+ } else {
+ map_[word] &= ~to_add;
+ }
+}
+
} // namespace disk_cache
diff --git a/net/disk_cache/bitmap.h b/net/disk_cache/bitmap.h
index 8e24e15..8b3324c 100644
--- a/net/disk_cache/bitmap.h
+++ b/net/disk_cache/bitmap.h
@@ -6,8 +6,6 @@
#define NET_DISK_CACHE_BITMAP_H_
#pragma once
-#include <algorithm>
-
#include "base/basictypes.h"
namespace disk_cache {
@@ -19,30 +17,14 @@ class Bitmap {
// This constructor will allocate on a uint32 boundary. If |clear_bits| is
// false, the bitmap bits will not be initialized.
- Bitmap(int num_bits, bool clear_bits)
- : num_bits_(num_bits), array_size_(RequiredArraySize(num_bits)),
- alloc_(true) {
- map_ = new uint32[array_size_];
-
- // Initialize all of the bits.
- if (clear_bits)
- Clear();
- }
+ Bitmap(int num_bits, bool clear_bits);
// Constructs a Bitmap with the actual storage provided by the caller. |map|
// has to be valid until this object destruction. |num_bits| is the number of
// bits in the bitmap, and |num_words| is the size of |map| in 32-bit words.
- Bitmap(uint32* map, int num_bits, int num_words)
- : map_(map), num_bits_(num_bits),
- // If size is larger than necessary, trim because array_size_ is used
- // as a bound by various methods.
- array_size_(std::min(RequiredArraySize(num_bits), num_words)),
- alloc_(false) {}
-
- ~Bitmap() {
- if (alloc_)
- delete[] map_;
- }
+ Bitmap(uint32* map, int num_bits, int num_words);
+
+ ~Bitmap();
// Resizes the bitmap.
// If |num_bits| < Size(), the extra bits will be discarded.
diff --git a/net/disk_cache/block_files.cc b/net/disk_cache/block_files.cc
index 3eb4e35..faa9706 100644
--- a/net/disk_cache/block_files.cc
+++ b/net/disk_cache/block_files.cc
@@ -220,6 +220,91 @@ bool BlockFiles::Init(bool create_files) {
return true;
}
+MappedFile* BlockFiles::GetFile(Addr address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ DCHECK(block_files_.size() >= 4);
+ DCHECK(address.is_block_file() || !address.is_initialized());
+ if (!address.is_initialized())
+ return NULL;
+
+ int file_index = address.FileNumber();
+ if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
+ !block_files_[file_index]) {
+ // We need to open the file
+ if (!OpenBlockFile(file_index))
+ return NULL;
+ }
+ DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
+ return block_files_[file_index];
+}
+
+bool BlockFiles::CreateBlock(FileType block_type, int block_count,
+ Addr* block_address) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ if (block_type < RANKINGS || block_type > BLOCK_4K ||
+ block_count < 1 || block_count > 4)
+ return false;
+ if (!init_)
+ return false;
+
+ MappedFile* file = FileForNewBlock(block_type, block_count);
+ if (!file)
+ return false;
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+
+ int target_size = 0;
+ for (int i = block_count; i <= 4; i++) {
+ if (header->empty[i - 1]) {
+ target_size = i;
+ break;
+ }
+ }
+
+ DCHECK(target_size);
+ int index;
+ if (!CreateMapBlock(target_size, block_count, header, &index))
+ return false;
+
+ Addr address(block_type, block_count, header->this_file, index);
+ block_address->set_value(address.value());
+ Trace("CreateBlock 0x%x", address.value());
+ return true;
+}
+
+void BlockFiles::DeleteBlock(Addr address, bool deep) {
+ DCHECK(thread_checker_->CalledOnValidThread());
+ if (!address.is_initialized() || address.is_separate_file())
+ return;
+
+ if (!zero_buffer_) {
+ zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4];
+ memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4);
+ }
+ MappedFile* file = GetFile(address);
+ if (!file)
+ return;
+
+ Trace("DeleteBlock 0x%x", address.value());
+
+ BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
+ DeleteMapBlock(address.start_block(), address.num_blocks(), header);
+
+ size_t size = address.BlockSize() * address.num_blocks();
+ size_t offset = address.start_block() * address.BlockSize() +
+ kBlockHeaderSize;
+ if (deep)
+ file->Write(zero_buffer_, size, offset);
+
+ if (!header->num_entries) {
+ // This file is now empty. Let's try to delete it.
+ FileType type = Addr::RequiredFileType(header->entry_size);
+ if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size)
+ type = RANKINGS;
+ RemoveEmptyFile(type);
+ }
+}
+
void BlockFiles::CloseFiles() {
if (init_) {
DCHECK(thread_checker_->CalledOnValidThread());
@@ -346,24 +431,6 @@ bool BlockFiles::OpenBlockFile(int index) {
return true;
}
-MappedFile* BlockFiles::GetFile(Addr address) {
- DCHECK(thread_checker_->CalledOnValidThread());
- DCHECK(block_files_.size() >= 4);
- DCHECK(address.is_block_file() || !address.is_initialized());
- if (!address.is_initialized())
- return NULL;
-
- int file_index = address.FileNumber();
- if (static_cast<unsigned int>(file_index) >= block_files_.size() ||
- !block_files_[file_index]) {
- // We need to open the file
- if (!OpenBlockFile(file_index))
- return NULL;
- }
- DCHECK(block_files_.size() >= static_cast<unsigned int>(file_index));
- return block_files_[file_index];
-}
-
bool BlockFiles::GrowBlockFile(MappedFile* file, BlockFileHeader* header) {
if (kMaxBlocks == header->max_entries)
return false;
@@ -489,73 +556,6 @@ void BlockFiles::RemoveEmptyFile(FileType block_type) {
}
}
-bool BlockFiles::CreateBlock(FileType block_type, int block_count,
- Addr* block_address) {
- DCHECK(thread_checker_->CalledOnValidThread());
- if (block_type < RANKINGS || block_type > BLOCK_4K ||
- block_count < 1 || block_count > 4)
- return false;
- if (!init_)
- return false;
-
- MappedFile* file = FileForNewBlock(block_type, block_count);
- if (!file)
- return false;
-
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
-
- int target_size = 0;
- for (int i = block_count; i <= 4; i++) {
- if (header->empty[i - 1]) {
- target_size = i;
- break;
- }
- }
-
- DCHECK(target_size);
- int index;
- if (!CreateMapBlock(target_size, block_count, header, &index))
- return false;
-
- Addr address(block_type, block_count, header->this_file, index);
- block_address->set_value(address.value());
- Trace("CreateBlock 0x%x", address.value());
- return true;
-}
-
-void BlockFiles::DeleteBlock(Addr address, bool deep) {
- DCHECK(thread_checker_->CalledOnValidThread());
- if (!address.is_initialized() || address.is_separate_file())
- return;
-
- if (!zero_buffer_) {
- zero_buffer_ = new char[Addr::BlockSizeForFileType(BLOCK_4K) * 4];
- memset(zero_buffer_, 0, Addr::BlockSizeForFileType(BLOCK_4K) * 4);
- }
- MappedFile* file = GetFile(address);
- if (!file)
- return;
-
- Trace("DeleteBlock 0x%x", address.value());
-
- BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
- DeleteMapBlock(address.start_block(), address.num_blocks(), header);
-
- size_t size = address.BlockSize() * address.num_blocks();
- size_t offset = address.start_block() * address.BlockSize() +
- kBlockHeaderSize;
- if (deep)
- file->Write(zero_buffer_, size, offset);
-
- if (!header->num_entries) {
- // This file is now empty. Let's try to delete it.
- FileType type = Addr::RequiredFileType(header->entry_size);
- if (Addr::BlockSizeForFileType(RANKINGS) == header->entry_size)
- type = RANKINGS;
- RemoveEmptyFile(type);
- }
-}
-
bool BlockFiles::FixBlockFileHeader(MappedFile* file) {
BlockFileHeader* header = reinterpret_cast<BlockFileHeader*>(file->buffer());
int file_size = static_cast<int>(file->GetLength());
diff --git a/net/disk_cache/disk_cache_test_base.cc b/net/disk_cache/disk_cache_test_base.cc
index 6c9b91c..3860667 100644
--- a/net/disk_cache/disk_cache_test_base.cc
+++ b/net/disk_cache/disk_cache_test_base.cc
@@ -15,15 +15,25 @@ void DiskCacheTest::TearDown() {
MessageLoop::current()->RunAllPending();
}
-void DiskCacheTestWithCache::SetMaxSize(int size) {
- size_ = size;
- if (cache_impl_)
- EXPECT_TRUE(cache_impl_->SetMaxSize(size));
-
- if (mem_cache_)
- EXPECT_TRUE(mem_cache_->SetMaxSize(size));
+DiskCacheTestWithCache::DiskCacheTestWithCache()
+ : cache_(NULL),
+ cache_impl_(NULL),
+ mem_cache_(NULL),
+ mask_(0),
+ size_(0),
+ type_(net::DISK_CACHE),
+ memory_only_(false),
+ implementation_(false),
+ force_creation_(false),
+ new_eviction_(false),
+ first_cleanup_(true),
+ integrity_(true),
+ use_current_thread_(false),
+ cache_thread_("CacheThread") {
}
+DiskCacheTestWithCache::~DiskCacheTestWithCache() {}
+
void DiskCacheTestWithCache::InitCache() {
if (mask_ || new_eviction_)
implementation_ = true;
@@ -38,86 +48,6 @@ void DiskCacheTestWithCache::InitCache() {
ASSERT_EQ(0, cache_->GetEntryCount());
}
-void DiskCacheTestWithCache::InitMemoryCache() {
- if (!implementation_) {
- cache_ = disk_cache::MemBackendImpl::CreateBackend(size_);
- return;
- }
-
- mem_cache_ = new disk_cache::MemBackendImpl();
- cache_ = mem_cache_;
- ASSERT_TRUE(NULL != cache_);
-
- if (size_)
- EXPECT_TRUE(mem_cache_->SetMaxSize(size_));
-
- ASSERT_TRUE(mem_cache_->Init());
-}
-
-void DiskCacheTestWithCache::InitDiskCache() {
- FilePath path = GetCacheFilePath();
- if (first_cleanup_)
- ASSERT_TRUE(DeleteCache(path));
-
- if (!cache_thread_.IsRunning()) {
- EXPECT_TRUE(cache_thread_.StartWithOptions(
- base::Thread::Options(MessageLoop::TYPE_IO, 0)));
- }
- ASSERT_TRUE(cache_thread_.message_loop() != NULL);
-
- if (implementation_)
- return InitDiskCacheImpl(path);
-
- scoped_refptr<base::MessageLoopProxy> thread =
- use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() :
- cache_thread_.message_loop_proxy();
-
- TestCompletionCallback cb;
- int rv = disk_cache::BackendImpl::CreateBackend(
- path, force_creation_, size_, type_,
- disk_cache::kNoRandom, thread, NULL, &cache_, &cb);
- ASSERT_EQ(net::OK, cb.GetResult(rv));
-}
-
-void DiskCacheTestWithCache::InitDiskCacheImpl(const FilePath& path) {
- scoped_refptr<base::MessageLoopProxy> thread =
- use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() :
- cache_thread_.message_loop_proxy();
- if (mask_)
- cache_impl_ = new disk_cache::BackendImpl(path, mask_, thread, NULL);
- else
- cache_impl_ = new disk_cache::BackendImpl(path, thread, NULL);
-
- cache_ = cache_impl_;
- ASSERT_TRUE(NULL != cache_);
-
- if (size_)
- EXPECT_TRUE(cache_impl_->SetMaxSize(size_));
-
- if (new_eviction_)
- cache_impl_->SetNewEviction();
-
- cache_impl_->SetType(type_);
- cache_impl_->SetFlags(disk_cache::kNoRandom);
- TestCompletionCallback cb;
- int rv = cache_impl_->Init(&cb);
- ASSERT_EQ(net::OK, cb.GetResult(rv));
-}
-
-void DiskCacheTestWithCache::TearDown() {
- MessageLoop::current()->RunAllPending();
- delete cache_;
- if (cache_thread_.IsRunning())
- cache_thread_.Stop();
-
- if (!memory_only_ && integrity_) {
- FilePath path = GetCacheFilePath();
- EXPECT_TRUE(CheckCacheIntegrity(path, new_eviction_));
- }
-
- PlatformTest::TearDown();
-}
-
// We are expected to leak memory when simulating crashes.
void DiskCacheTestWithCache::SimulateCrash() {
ASSERT_TRUE(implementation_ && !memory_only_);
@@ -138,6 +68,15 @@ void DiskCacheTestWithCache::SetTestMode() {
cache_impl_->SetUnitTestMode();
}
+void DiskCacheTestWithCache::SetMaxSize(int size) {
+ size_ = size;
+ if (cache_impl_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size));
+
+ if (mem_cache_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size));
+}
+
int DiskCacheTestWithCache::OpenEntry(const std::string& key,
disk_cache::Entry** entry) {
TestCompletionCallback cb;
@@ -236,3 +175,83 @@ int DiskCacheTestWithCache::WriteSparseData(disk_cache::Entry* entry,
int rv = entry->WriteSparseData(offset, buf, len, &cb);
return cb.GetResult(rv);
}
+
+void DiskCacheTestWithCache::TearDown() {
+ MessageLoop::current()->RunAllPending();
+ delete cache_;
+ if (cache_thread_.IsRunning())
+ cache_thread_.Stop();
+
+ if (!memory_only_ && integrity_) {
+ FilePath path = GetCacheFilePath();
+ EXPECT_TRUE(CheckCacheIntegrity(path, new_eviction_));
+ }
+
+ PlatformTest::TearDown();
+}
+
+void DiskCacheTestWithCache::InitMemoryCache() {
+ if (!implementation_) {
+ cache_ = disk_cache::MemBackendImpl::CreateBackend(size_);
+ return;
+ }
+
+ mem_cache_ = new disk_cache::MemBackendImpl();
+ cache_ = mem_cache_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ EXPECT_TRUE(mem_cache_->SetMaxSize(size_));
+
+ ASSERT_TRUE(mem_cache_->Init());
+}
+
+void DiskCacheTestWithCache::InitDiskCache() {
+ FilePath path = GetCacheFilePath();
+ if (first_cleanup_)
+ ASSERT_TRUE(DeleteCache(path));
+
+ if (!cache_thread_.IsRunning()) {
+ EXPECT_TRUE(cache_thread_.StartWithOptions(
+ base::Thread::Options(MessageLoop::TYPE_IO, 0)));
+ }
+ ASSERT_TRUE(cache_thread_.message_loop() != NULL);
+
+ if (implementation_)
+ return InitDiskCacheImpl(path);
+
+ scoped_refptr<base::MessageLoopProxy> thread =
+ use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() :
+ cache_thread_.message_loop_proxy();
+
+ TestCompletionCallback cb;
+ int rv = disk_cache::BackendImpl::CreateBackend(
+ path, force_creation_, size_, type_,
+ disk_cache::kNoRandom, thread, NULL, &cache_, &cb);
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+}
+
+void DiskCacheTestWithCache::InitDiskCacheImpl(const FilePath& path) {
+ scoped_refptr<base::MessageLoopProxy> thread =
+ use_current_thread_ ? base::MessageLoopProxy::CreateForCurrentThread() :
+ cache_thread_.message_loop_proxy();
+ if (mask_)
+ cache_impl_ = new disk_cache::BackendImpl(path, mask_, thread, NULL);
+ else
+ cache_impl_ = new disk_cache::BackendImpl(path, thread, NULL);
+
+ cache_ = cache_impl_;
+ ASSERT_TRUE(NULL != cache_);
+
+ if (size_)
+ EXPECT_TRUE(cache_impl_->SetMaxSize(size_));
+
+ if (new_eviction_)
+ cache_impl_->SetNewEviction();
+
+ cache_impl_->SetType(type_);
+ cache_impl_->SetFlags(disk_cache::kNoRandom);
+ TestCompletionCallback cb;
+ int rv = cache_impl_->Init(&cb);
+ ASSERT_EQ(net::OK, cb.GetResult(rv));
+}
diff --git a/net/disk_cache/disk_cache_test_base.h b/net/disk_cache/disk_cache_test_base.h
index 13b3879..0fd98b8 100644
--- a/net/disk_cache/disk_cache_test_base.h
+++ b/net/disk_cache/disk_cache_test_base.h
@@ -40,15 +40,10 @@ class DiskCacheTest : public PlatformTest {
// Provides basic support for cache related tests.
class DiskCacheTestWithCache : public DiskCacheTest {
protected:
- DiskCacheTestWithCache()
- : cache_(NULL), cache_impl_(NULL), mem_cache_(NULL), mask_(0), size_(0),
- type_(net::DISK_CACHE), memory_only_(false), implementation_(false),
- force_creation_(false), new_eviction_(false), first_cleanup_(true),
- integrity_(true), use_current_thread_(false),
- cache_thread_("CacheThread") {}
+ DiskCacheTestWithCache();
+ virtual ~DiskCacheTestWithCache();
void InitCache();
- virtual void TearDown();
void SimulateCrash();
void SetTestMode();
@@ -112,6 +107,9 @@ class DiskCacheTestWithCache : public DiskCacheTest {
int WriteSparseData(disk_cache::Entry* entry, int64 offset,
net::IOBuffer* buf, int len);
+ // DiskCacheTest:
+ virtual void TearDown();
+
// cache_ will always have a valid object, regardless of how the cache was
// initialized. The implementation pointers can be NULL.
disk_cache::Backend* cache_;
diff --git a/net/disk_cache/mem_entry_impl.cc b/net/disk_cache/mem_entry_impl.cc
index a9e599c..30599f4 100644
--- a/net/disk_cache/mem_entry_impl.cc
+++ b/net/disk_cache/mem_entry_impl.cc
@@ -48,12 +48,66 @@ MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
data_size_[i] = 0;
}
-MemEntryImpl::~MemEntryImpl() {
- for (int i = 0; i < NUM_STREAMS; i++)
- backend_->ModifyStorageSize(data_size_[i], 0);
- backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
+// ------------------------------------------------------------------------
+
+bool MemEntryImpl::CreateEntry(const std::string& key) {
+ key_ = key;
+ Time current = Time::Now();
+ last_modified_ = current;
+ last_used_ = current;
+ Open();
+ backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
+ return true;
+}
+
+void MemEntryImpl::InternalDoom() {
+ doomed_ = true;
+ if (!ref_count_) {
+ if (type() == kParentEntry) {
+ // If this is a parent entry, we need to doom all the child entries.
+ if (children_.get()) {
+ EntryMap children;
+ children.swap(*children_);
+ for (EntryMap::iterator i = children.begin();
+ i != children.end(); ++i) {
+ // Since a pointer to this object is also saved in the map, avoid
+ // dooming it.
+ if (i->second != this)
+ i->second->Doom();
+ }
+ DCHECK(children_->size() == 0);
+ }
+ } else {
+ // If this is a child entry, detach it from the parent.
+ parent_->DetachChild(child_id_);
+ }
+ delete this;
+ }
+}
+
+void MemEntryImpl::Open() {
+ // Only a parent entry can be opened.
+ // TODO(hclam): make sure it's correct to not apply the concept of ref
+ // counting to child entry.
+ DCHECK(type() == kParentEntry);
+ ref_count_++;
+ DCHECK(ref_count_ >= 0);
+ DCHECK(!doomed_);
+}
+
+bool MemEntryImpl::InUse() {
+ if (type() == kParentEntry) {
+ return ref_count_ > 0;
+ } else {
+ // A child entry is always not in use. The consequence is that a child entry
+ // can always be evicted while the associated parent entry is currently in
+ // used (i.e. opened).
+ return false;
+ }
}
+// ------------------------------------------------------------------------
+
void MemEntryImpl::Doom() {
if (doomed_)
return;
@@ -263,6 +317,29 @@ int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
return io_buf->BytesConsumed();
}
+int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback) {
+ return GetAvailableRange(offset, len, start);
+}
+
+bool MemEntryImpl::CouldBeSparse() const {
+ DCHECK_EQ(kParentEntry, type());
+ return (children_.get() != NULL);
+}
+
+int MemEntryImpl::ReadyForSparseIO(
+ net::CompletionCallback* completion_callback) {
+ return net::OK;
+}
+
+// ------------------------------------------------------------------------
+
+MemEntryImpl::~MemEntryImpl() {
+ for (int i = 0; i < NUM_STREAMS; i++)
+ backend_->ModifyStorageSize(data_size_[i], 0);
+ backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
+}
+
int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
DCHECK(type() == kParentEntry);
DCHECK(start);
@@ -307,81 +384,6 @@ int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
return 0;
}
-int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
- CompletionCallback* callback) {
- return GetAvailableRange(offset, len, start);
-}
-
-bool MemEntryImpl::CouldBeSparse() const {
- DCHECK_EQ(kParentEntry, type());
- return (children_.get() != NULL);
-}
-
-int MemEntryImpl::ReadyForSparseIO(
- net::CompletionCallback* completion_callback) {
- return net::OK;
-}
-
-// ------------------------------------------------------------------------
-
-bool MemEntryImpl::CreateEntry(const std::string& key) {
- key_ = key;
- Time current = Time::Now();
- last_modified_ = current;
- last_used_ = current;
- Open();
- backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
- return true;
-}
-
-void MemEntryImpl::InternalDoom() {
- doomed_ = true;
- if (!ref_count_) {
- if (type() == kParentEntry) {
- // If this is a parent entry, we need to doom all the child entries.
- if (children_.get()) {
- EntryMap children;
- children.swap(*children_);
- for (EntryMap::iterator i = children.begin();
- i != children.end(); ++i) {
- // Since a pointer to this object is also saved in the map, avoid
- // dooming it.
- if (i->second != this)
- i->second->Doom();
- }
- DCHECK(children_->size() == 0);
- }
- } else {
- // If this is a child entry, detach it from the parent.
- parent_->DetachChild(child_id_);
- }
- delete this;
- }
-}
-
-void MemEntryImpl::Open() {
- // Only a parent entry can be opened.
- // TODO(hclam): make sure it's correct to not apply the concept of ref
- // counting to child entry.
- DCHECK(type() == kParentEntry);
- ref_count_++;
- DCHECK(ref_count_ >= 0);
- DCHECK(!doomed_);
-}
-
-bool MemEntryImpl::InUse() {
- if (type() == kParentEntry) {
- return ref_count_ > 0;
- } else {
- // A child entry is always not in use. The consequence is that a child entry
- // can always be evicted while the associated parent entry is currently in
- // used (i.e. opened).
- return false;
- }
-}
-
-// ------------------------------------------------------------------------
-
void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
int entry_size = GetDataSize(index);
diff --git a/net/disk_cache/mem_entry_impl.h b/net/disk_cache/mem_entry_impl.h
index 573a306..f4ac4ef 100644
--- a/net/disk_cache/mem_entry_impl.h
+++ b/net/disk_cache/mem_entry_impl.h
@@ -52,28 +52,6 @@ class MemEntryImpl : public Entry {
explicit MemEntryImpl(MemBackendImpl* backend);
- // Entry interface.
- virtual void Doom();
- virtual void Close();
- virtual std::string GetKey() const;
- virtual base::Time GetLastUsed() const;
- virtual base::Time GetLastModified() const;
- virtual int32 GetDataSize(int index) const;
- virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* completion_callback);
- virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* completion_callback,
- bool truncate);
- virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* completion_callback);
- virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
- net::CompletionCallback* completion_callback);
- virtual int GetAvailableRange(int64 offset, int len, int64* start,
- CompletionCallback* callback);
- virtual bool CouldBeSparse() const;
- virtual void CancelSparseIO() {}
- virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
-
// Performs the initialization of a EntryImpl that will be added to the
// cache.
bool CreateEntry(const std::string& key);
@@ -104,6 +82,28 @@ class MemEntryImpl : public Entry {
return parent_ ? kChildEntry : kParentEntry;
}
+ // Entry interface.
+ virtual void Doom();
+ virtual void Close();
+ virtual std::string GetKey() const;
+ virtual base::Time GetLastUsed() const;
+ virtual base::Time GetLastModified() const;
+ virtual int32 GetDataSize(int index) const;
+ virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback,
+ bool truncate);
+ virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+ net::CompletionCallback* completion_callback);
+ virtual int GetAvailableRange(int64 offset, int len, int64* start,
+ CompletionCallback* callback);
+ virtual bool CouldBeSparse() const;
+ virtual void CancelSparseIO() {}
+ virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
+
private:
typedef base::hash_map<int, MemEntryImpl*> EntryMap;
diff --git a/net/disk_cache/stats_histogram.cc b/net/disk_cache/stats_histogram.cc
index 366a7e1..39c9056 100644
--- a/net/disk_cache/stats_histogram.cc
+++ b/net/disk_cache/stats_histogram.cc
@@ -15,6 +15,12 @@ using base::StatisticsRecorder;
// Static.
const Stats* StatsHistogram::stats_ = NULL;
+StatsHistogram::~StatsHistogram() {
+ // Only cleanup what we set.
+ if (init_)
+ stats_ = NULL;
+}
+
scoped_refptr<StatsHistogram> StatsHistogram::StatsHistogramFactoryGet(
const std::string& name) {
scoped_refptr<Histogram> histogram(NULL);
@@ -59,12 +65,6 @@ bool StatsHistogram::Init(const Stats* stats) {
return true;
}
-StatsHistogram::~StatsHistogram() {
- // Only cleanup what we set.
- if (init_)
- stats_ = NULL;
-}
-
Histogram::Sample StatsHistogram::ranges(size_t i) const {
DCHECK(stats_);
return stats_->GetBucketRange(i);
diff --git a/net/proxy/proxy_resolver_v8.h b/net/proxy/proxy_resolver_v8.h
index 28bdcd0..6cd340f 100644
--- a/net/proxy/proxy_resolver_v8.h
+++ b/net/proxy/proxy_resolver_v8.h
@@ -40,6 +40,8 @@ class ProxyResolverV8 : public ProxyResolver {
virtual ~ProxyResolverV8();
+ ProxyResolverJSBindings* js_bindings() const { return js_bindings_.get(); }
+
// ProxyResolver implementation:
virtual int GetProxyForURL(const GURL& url,
ProxyInfo* results,
@@ -54,8 +56,6 @@ class ProxyResolverV8 : public ProxyResolver {
const scoped_refptr<ProxyResolverScriptData>& script_data,
CompletionCallback* /*callback*/);
- ProxyResolverJSBindings* js_bindings() const { return js_bindings_.get(); }
-
private:
// Context holds the Javascript state for the most recently loaded PAC
// script. It corresponds with the data from the last call to
diff --git a/net/proxy/proxy_script_fetcher_impl.cc b/net/proxy/proxy_script_fetcher_impl.cc
index 7d22ed3..bc245a2 100644
--- a/net/proxy/proxy_script_fetcher_impl.cc
+++ b/net/proxy/proxy_script_fetcher_impl.cc
@@ -90,6 +90,30 @@ ProxyScriptFetcherImpl::~ProxyScriptFetcherImpl() {
// ensure that the delegate (this) is not called again.
}
+base::TimeDelta ProxyScriptFetcherImpl::SetTimeoutConstraint(
+ base::TimeDelta timeout) {
+ base::TimeDelta prev = max_duration_;
+ max_duration_ = timeout;
+ return prev;
+}
+
+size_t ProxyScriptFetcherImpl::SetSizeConstraint(size_t size_bytes) {
+ size_t prev = max_response_bytes_;
+ max_response_bytes_ = size_bytes;
+ return prev;
+}
+
+void ProxyScriptFetcherImpl::OnResponseCompleted(URLRequest* request) {
+ DCHECK_EQ(request, cur_request_.get());
+
+ // Use |result_code_| as the request's error if we have already set it to
+ // something specific.
+ if (result_code_ == OK && !request->status().is_success())
+ result_code_ = request->status().os_error();
+
+ FetchCompleted();
+}
+
int ProxyScriptFetcherImpl::Fetch(const GURL& url,
string16* text,
CompletionCallback* callback) {
@@ -200,17 +224,6 @@ void ProxyScriptFetcherImpl::OnReadCompleted(URLRequest* request,
}
}
-void ProxyScriptFetcherImpl::OnResponseCompleted(URLRequest* request) {
- DCHECK_EQ(request, cur_request_.get());
-
- // Use |result_code_| as the request's error if we have already set it to
- // something specific.
- if (result_code_ == OK && !request->status().is_success())
- result_code_ = request->status().os_error();
-
- FetchCompleted();
-}
-
void ProxyScriptFetcherImpl::ReadBody(URLRequest* request) {
// Read as many bytes as are available synchronously.
while (true) {
@@ -287,17 +300,4 @@ void ProxyScriptFetcherImpl::OnTimeout(int id) {
cur_request_->Cancel();
}
-base::TimeDelta ProxyScriptFetcherImpl::SetTimeoutConstraint(
- base::TimeDelta timeout) {
- base::TimeDelta prev = max_duration_;
- max_duration_ = timeout;
- return prev;
-}
-
-size_t ProxyScriptFetcherImpl::SetSizeConstraint(size_t size_bytes) {
- size_t prev = max_response_bytes_;
- max_response_bytes_ = size_bytes;
- return prev;
-}
-
} // namespace net
diff --git a/net/proxy/proxy_script_fetcher_impl.h b/net/proxy/proxy_script_fetcher_impl.h
index 65c16f7..e3abb55 100644
--- a/net/proxy/proxy_script_fetcher_impl.h
+++ b/net/proxy/proxy_script_fetcher_impl.h
@@ -37,8 +37,13 @@ class ProxyScriptFetcherImpl : public ProxyScriptFetcher,
virtual ~ProxyScriptFetcherImpl();
- // ProxyScriptFetcher methods:
+ // Used by unit-tests to modify the default limits.
+ base::TimeDelta SetTimeoutConstraint(base::TimeDelta timeout);
+ size_t SetSizeConstraint(size_t size_bytes);
+ virtual void OnResponseCompleted(URLRequest* request);
+
+ // ProxyScriptFetcher methods:
virtual int Fetch(const GURL& url, string16* text,
CompletionCallback* callback);
virtual void Cancel();
@@ -51,13 +56,10 @@ class ProxyScriptFetcherImpl : public ProxyScriptFetcher,
X509Certificate* cert);
virtual void OnResponseStarted(URLRequest* request);
virtual void OnReadCompleted(URLRequest* request, int num_bytes);
- virtual void OnResponseCompleted(URLRequest* request);
-
- // Used by unit-tests to modify the default limits.
- base::TimeDelta SetTimeoutConstraint(base::TimeDelta timeout);
- size_t SetSizeConstraint(size_t size_bytes);
private:
+ enum { kBufSize = 4096 };
+
// Read more bytes from the response.
void ReadBody(URLRequest* request);
@@ -83,7 +85,6 @@ class ProxyScriptFetcherImpl : public ProxyScriptFetcher,
URLRequestContext* url_request_context_;
// Buffer that URLRequest writes into.
- enum { kBufSize = 4096 };
scoped_refptr<IOBuffer> buf_;
// The next ID to use for |cur_request_| (monotonically increasing).
diff --git a/net/socket_stream/socket_stream.cc b/net/socket_stream/socket_stream.cc
index e3dc817..b65fa6e 100644
--- a/net/socket_stream/socket_stream.cc
+++ b/net/socket_stream/socket_stream.cc
@@ -39,12 +39,13 @@ static const int kReadBufferSize = 4096;
namespace net {
SocketStream::ResponseHeaders::ResponseHeaders() : IOBuffer() {}
-SocketStream::ResponseHeaders::~ResponseHeaders() { data_ = NULL; }
void SocketStream::ResponseHeaders::Realloc(size_t new_size) {
headers_.reset(static_cast<char*>(realloc(headers_.release(), new_size)));
}
+SocketStream::ResponseHeaders::~ResponseHeaders() { data_ = NULL; }
+
SocketStream::SocketStream(const GURL& url, Delegate* delegate)
: delegate_(delegate),
url_(url),
@@ -78,12 +79,6 @@ SocketStream::SocketStream(const GURL& url, Delegate* delegate)
DCHECK(delegate_);
}
-SocketStream::~SocketStream() {
- set_context(NULL);
- DCHECK(!delegate_);
- DCHECK(!pac_request_);
-}
-
SocketStream::UserData* SocketStream::GetUserData(
const void* key) const {
UserDataMap::const_iterator found = user_data_.find(key);
@@ -96,6 +91,10 @@ void SocketStream::SetUserData(const void* key, UserData* data) {
user_data_[key] = linked_ptr<UserData>(data);
}
+bool SocketStream::is_secure() const {
+ return url_.SchemeIs("wss");
+}
+
void SocketStream::set_context(URLRequestContext* context) {
scoped_refptr<URLRequestContext> prev_context = context_;
@@ -200,26 +199,6 @@ void SocketStream::Close() {
NewRunnableMethod(this, &SocketStream::DoClose));
}
-void SocketStream::DoClose() {
- closing_ = true;
- // If next_state_ is STATE_TCP_CONNECT, it's waiting other socket establishing
- // connection. If next_state_ is STATE_AUTH_REQUIRED, it's waiting for
- // restarting. In these states, we'll close the SocketStream now.
- if (next_state_ == STATE_TCP_CONNECT || next_state_ == STATE_AUTH_REQUIRED) {
- DoLoop(ERR_ABORTED);
- return;
- }
- // If next_state_ is STATE_READ_WRITE, we'll run DoLoop and close
- // the SocketStream.
- // If it's writing now, we should defer the closing after the current
- // writing is completed.
- if (next_state_ == STATE_READ_WRITE && !current_write_buf_)
- DoLoop(ERR_ABORTED);
-
- // In other next_state_, we'll wait for callback of other APIs, such as
- // ResolveProxy().
-}
-
void SocketStream::RestartWithAuth(
const string16& username, const string16& password) {
DCHECK(MessageLoop::current()) <<
@@ -255,6 +234,47 @@ void SocketStream::DetachDelegate() {
Close();
}
+void SocketStream::SetHostResolver(HostResolver* host_resolver) {
+ DCHECK(host_resolver);
+ host_resolver_ = host_resolver;
+}
+
+void SocketStream::SetClientSocketFactory(
+ ClientSocketFactory* factory) {
+ DCHECK(factory);
+ factory_ = factory;
+}
+
+SocketStream::~SocketStream() {
+ set_context(NULL);
+ DCHECK(!delegate_);
+ DCHECK(!pac_request_);
+}
+
+void SocketStream::CopyAddrInfo(struct addrinfo* head) {
+ addresses_.Copy(head, true);
+}
+
+void SocketStream::DoClose() {
+ closing_ = true;
+ // If next_state_ is STATE_TCP_CONNECT, it's waiting other socket establishing
+ // connection. If next_state_ is STATE_AUTH_REQUIRED, it's waiting for
+ // restarting. In these states, we'll close the SocketStream now.
+ if (next_state_ == STATE_TCP_CONNECT || next_state_ == STATE_AUTH_REQUIRED) {
+ DoLoop(ERR_ABORTED);
+ return;
+ }
+ // If next_state_ is STATE_READ_WRITE, we'll run DoLoop and close
+ // the SocketStream.
+ // If it's writing now, we should defer the closing after the current
+ // writing is completed.
+ if (next_state_ == STATE_READ_WRITE && !current_write_buf_)
+ DoLoop(ERR_ABORTED);
+
+ // In other next_state_, we'll wait for callback of other APIs, such as
+ // ResolveProxy().
+}
+
void SocketStream::Finish(int result) {
DCHECK(MessageLoop::current()) <<
"The current MessageLoop must exist";
@@ -277,21 +297,6 @@ void SocketStream::Finish(int result) {
Release();
}
-void SocketStream::SetHostResolver(HostResolver* host_resolver) {
- DCHECK(host_resolver);
- host_resolver_ = host_resolver;
-}
-
-void SocketStream::SetClientSocketFactory(
- ClientSocketFactory* factory) {
- DCHECK(factory);
- factory_ = factory;
-}
-
-void SocketStream::CopyAddrInfo(struct addrinfo* head) {
- addresses_.Copy(head, true);
-}
-
int SocketStream::DidEstablishConnection() {
if (!socket_.get() || !socket_->IsConnected()) {
next_state_ = STATE_CLOSE;
@@ -1023,10 +1028,6 @@ int SocketStream::HandleCertificateError(int result) {
return result;
}
-bool SocketStream::is_secure() const {
- return url_.SchemeIs("wss");
-}
-
SSLConfigService* SocketStream::ssl_config_service() const {
return context_->ssl_config_service();
}
diff --git a/net/socket_stream/socket_stream_metrics.h b/net/socket_stream/socket_stream_metrics.h
index 946a065..66a9ffd 100644
--- a/net/socket_stream/socket_stream_metrics.h
+++ b/net/socket_stream/socket_stream_metrics.h
@@ -18,25 +18,13 @@ namespace net {
class SocketStreamMetrics {
public:
- explicit SocketStreamMetrics(const GURL& url);
- ~SocketStreamMetrics();
-
- void OnWaitConnection();
- void OnStartConnection();
- void OnTunnelProxy();
- void OnSOCKSProxy();
- void OnSSLConnection();
- void OnConnected();
- void OnRead(int len);
- void OnWrite(int len);
- void OnClose();
-
enum ProtocolType {
PROTOCOL_UNKNOWN,
PROTOCOL_WEBSOCKET,
PROTOCOL_WEBSOCKET_SECURE,
NUM_PROTOCOL_TYPES,
};
+
enum ConnectionType {
CONNECTION_NONE,
ALL_CONNECTIONS,
@@ -46,6 +34,19 @@ class SocketStreamMetrics {
NUM_CONNECTION_TYPES,
};
+ explicit SocketStreamMetrics(const GURL& url);
+ ~SocketStreamMetrics();
+
+ void OnWaitConnection();
+ void OnStartConnection();
+ void OnTunnelProxy();
+ void OnSOCKSProxy();
+ void OnSSLConnection();
+ void OnConnected();
+ void OnRead(int len);
+ void OnWrite(int len);
+ void OnClose();
+
private:
void CountProtocolType(ProtocolType protocol_type);
void CountConnectionType(ConnectionType connection_type);
diff --git a/net/spdy/spdy_framer.cc b/net/spdy/spdy_framer.cc
index ed21610..9a3203b 100644
--- a/net/spdy/spdy_framer.cc
+++ b/net/spdy/spdy_framer.cc
@@ -19,8 +19,39 @@
#include "third_party/zlib/zlib.h"
#endif
+namespace {
+
+// The following compression setting are based on Brian Olson's analysis. See
+// https://groups.google.com/group/spdy-dev/browse_thread/thread/dfaf498542fac792
+// for more details.
+const int kCompressorLevel = 9;
+const int kCompressorWindowSizeInBits = 11;
+const int kCompressorMemLevel = 1;
+
+uLong dictionary_id = 0;
+
+} // namespace
+
namespace spdy {
+// This is just a hacked dictionary to use for shrinking HTTP-like headers.
+// TODO(mbelshe): Use a scientific methodology for computing the dictionary.
+const char SpdyFramer::kDictionary[] =
+ "optionsgetheadpostputdeletetraceacceptaccept-charsetaccept-encodingaccept-"
+ "languageauthorizationexpectfromhostif-modified-sinceif-matchif-none-matchi"
+ "f-rangeif-unmodifiedsincemax-forwardsproxy-authorizationrangerefererteuser"
+ "-agent10010120020120220320420520630030130230330430530630740040140240340440"
+ "5406407408409410411412413414415416417500501502503504505accept-rangesageeta"
+ "glocationproxy-authenticatepublicretry-afterservervarywarningwww-authentic"
+ "ateallowcontent-basecontent-encodingcache-controlconnectiondatetrailertran"
+ "sfer-encodingupgradeviawarningcontent-languagecontent-lengthcontent-locati"
+ "oncontent-md5content-rangecontent-typeetagexpireslast-modifiedset-cookieMo"
+ "ndayTuesdayWednesdayThursdayFridaySaturdaySundayJanFebMarAprMayJunJulAugSe"
+ "pOctNovDecchunkedtext/htmlimage/pngimage/jpgimage/gifapplication/xmlapplic"
+ "ation/xhtmltext/plainpublicmax-agecharset=iso-8859-1utf-8gzipdeflateHTTP/1"
+ ".1statusversionurl";
+const int SpdyFramer::kDictionarySize = arraysize(kDictionary);
+
// By default is compression on or off.
bool SpdyFramer::compression_default_ = true;
int SpdyFramer::spdy_version_ = kSpdyProtocolVersion;
@@ -71,92 +102,6 @@ SpdyFramer::~SpdyFramer() {
delete [] current_frame_buffer_;
}
-void SpdyFramer::Reset() {
- state_ = SPDY_RESET;
- error_code_ = SPDY_NO_ERROR;
- remaining_payload_ = 0;
- remaining_control_payload_ = 0;
- current_frame_len_ = 0;
- if (current_frame_capacity_ != kControlFrameBufferInitialSize) {
- delete [] current_frame_buffer_;
- current_frame_buffer_ = 0;
- current_frame_capacity_ = 0;
- ExpandControlFrameBuffer(kControlFrameBufferInitialSize);
- }
-}
-
-const char* SpdyFramer::StateToString(int state) {
- switch (state) {
- case SPDY_ERROR:
- return "ERROR";
- case SPDY_DONE:
- return "DONE";
- case SPDY_AUTO_RESET:
- return "AUTO_RESET";
- case SPDY_RESET:
- return "RESET";
- case SPDY_READING_COMMON_HEADER:
- return "READING_COMMON_HEADER";
- case SPDY_INTERPRET_CONTROL_FRAME_COMMON_HEADER:
- return "INTERPRET_CONTROL_FRAME_COMMON_HEADER";
- case SPDY_CONTROL_FRAME_PAYLOAD:
- return "CONTROL_FRAME_PAYLOAD";
- case SPDY_IGNORE_REMAINING_PAYLOAD:
- return "IGNORE_REMAINING_PAYLOAD";
- case SPDY_FORWARD_STREAM_FRAME:
- return "FORWARD_STREAM_FRAME";
- }
- return "UNKNOWN_STATE";
-}
-
-size_t SpdyFramer::BytesSafeToRead() const {
- switch (state_) {
- case SPDY_ERROR:
- case SPDY_DONE:
- case SPDY_AUTO_RESET:
- case SPDY_RESET:
- return 0;
- case SPDY_READING_COMMON_HEADER:
- DCHECK_LT(current_frame_len_, SpdyFrame::size());
- return SpdyFrame::size() - current_frame_len_;
- case SPDY_INTERPRET_CONTROL_FRAME_COMMON_HEADER:
- return 0;
- case SPDY_CONTROL_FRAME_PAYLOAD:
- case SPDY_IGNORE_REMAINING_PAYLOAD:
- case SPDY_FORWARD_STREAM_FRAME:
- return remaining_payload_;
- }
- // We should never get to here.
- return 0;
-}
-
-void SpdyFramer::set_error(SpdyError error) {
- DCHECK(visitor_);
- error_code_ = error;
- CHANGE_STATE(SPDY_ERROR);
- visitor_->OnError(this);
-}
-
-const char* SpdyFramer::ErrorCodeToString(int error_code) {
- switch (error_code) {
- case SPDY_NO_ERROR:
- return "NO_ERROR";
- case SPDY_INVALID_CONTROL_FRAME:
- return "INVALID_CONTROL_FRAME";
- case SPDY_CONTROL_PAYLOAD_TOO_LARGE:
- return "CONTROL_PAYLOAD_TOO_LARGE";
- case SPDY_ZLIB_INIT_FAILURE:
- return "ZLIB_INIT_FAILURE";
- case SPDY_UNSUPPORTED_VERSION:
- return "UNSUPPORTED_VERSION";
- case SPDY_DECOMPRESS_FAILURE:
- return "DECOMPRESS_FAILURE";
- case SPDY_COMPRESS_FAILURE:
- return "COMPRESS_FAILURE";
- }
- return "UNKNOWN_ERROR";
-}
-
size_t SpdyFramer::ProcessInput(const char* data, size_t len) {
DCHECK(visitor_);
DCHECK(data);
@@ -211,236 +156,18 @@ size_t SpdyFramer::ProcessInput(const char* data, size_t len) {
return original_len - len;
}
-size_t SpdyFramer::ProcessCommonHeader(const char* data, size_t len) {
- // This should only be called when we're in the SPDY_READING_COMMON_HEADER
- // state.
- DCHECK_EQ(state_, SPDY_READING_COMMON_HEADER);
-
- size_t original_len = len;
- SpdyFrame current_frame(current_frame_buffer_, false);
-
- do {
- if (current_frame_len_ < SpdyFrame::size()) {
- size_t bytes_desired = SpdyFrame::size() - current_frame_len_;
- size_t bytes_to_append = std::min(bytes_desired, len);
- char* header_buffer = current_frame_buffer_;
- memcpy(&header_buffer[current_frame_len_], data, bytes_to_append);
- current_frame_len_ += bytes_to_append;
- data += bytes_to_append;
- len -= bytes_to_append;
- // Check for an empty data frame.
- if (current_frame_len_ == SpdyFrame::size() &&
- !current_frame.is_control_frame() &&
- current_frame.length() == 0) {
- if (current_frame.flags() & CONTROL_FLAG_FIN) {
- SpdyDataFrame data_frame(current_frame_buffer_, false);
- visitor_->OnStreamFrameData(data_frame.stream_id(), NULL, 0);
- }
- CHANGE_STATE(SPDY_AUTO_RESET);
- }
- break;
- }
- remaining_payload_ = current_frame.length();
-
- // This is just a sanity check for help debugging early frame errors.
- if (remaining_payload_ > 1000000u) {
- LOG(WARNING) <<
- "Unexpectedly large frame. Spdy session is likely corrupt.";
- }
-
- // if we're here, then we have the common header all received.
- if (!current_frame.is_control_frame())
- CHANGE_STATE(SPDY_FORWARD_STREAM_FRAME);
- else
- CHANGE_STATE(SPDY_INTERPRET_CONTROL_FRAME_COMMON_HEADER);
- } while (false);
-
- return original_len - len;
-}
-
-void SpdyFramer::ProcessControlFrameHeader() {
- DCHECK_EQ(SPDY_NO_ERROR, error_code_);
- DCHECK_LE(SpdyFrame::size(), current_frame_len_);
- SpdyControlFrame current_control_frame(current_frame_buffer_, false);
-
- // We check version before we check validity: version can never be 'invalid',
- // it can only be unsupported.
- if (current_control_frame.version() != spdy_version_) {
- set_error(SPDY_UNSUPPORTED_VERSION);
- return;
- }
-
- // Next up, check to see if we have valid data. This should be after version
- // checking (otherwise if the the type were out of bounds due to a version
- // upgrade we would misclassify the error) and before checking the type
- // (type can definitely be out of bounds)
- if (!current_control_frame.AppearsToBeAValidControlFrame()) {
- set_error(SPDY_INVALID_CONTROL_FRAME);
- return;
- }
-
- // Do some sanity checking on the control frame sizes.
- switch (current_control_frame.type()) {
- case SYN_STREAM:
- if (current_control_frame.length() <
- SpdySynStreamControlFrame::size() - SpdyControlFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- case SYN_REPLY:
- if (current_control_frame.length() <
- SpdySynReplyControlFrame::size() - SpdyControlFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- case RST_STREAM:
- if (current_control_frame.length() !=
- SpdyRstStreamControlFrame::size() - SpdyFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- case SETTINGS:
- if (current_control_frame.length() <
- SpdySettingsControlFrame::size() - SpdyControlFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- case NOOP:
- // NOOP. Swallow it.
- CHANGE_STATE(SPDY_AUTO_RESET);
- return;
- case GOAWAY:
- if (current_control_frame.length() !=
- SpdyGoAwayControlFrame::size() - SpdyFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- case HEADERS:
- if (current_control_frame.length() <
- SpdyHeadersControlFrame::size() - SpdyControlFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- case WINDOW_UPDATE:
- if (current_control_frame.length() !=
- SpdyWindowUpdateControlFrame::size() - SpdyControlFrame::size())
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- default:
- LOG(WARNING) << "Valid spdy control frame with unknown type: "
- << current_control_frame.type();
- DCHECK(false);
- set_error(SPDY_INVALID_CONTROL_FRAME);
- break;
- }
-
- remaining_control_payload_ = current_control_frame.length();
- if (remaining_control_payload_ > kControlFrameBufferMaxSize) {
- set_error(SPDY_CONTROL_PAYLOAD_TOO_LARGE);
- return;
- }
-
- ExpandControlFrameBuffer(remaining_control_payload_);
- CHANGE_STATE(SPDY_CONTROL_FRAME_PAYLOAD);
-}
-
-size_t SpdyFramer::ProcessControlFramePayload(const char* data, size_t len) {
- size_t original_len = len;
- do {
- if (remaining_control_payload_) {
- size_t amount_to_consume = std::min(remaining_control_payload_, len);
- memcpy(&current_frame_buffer_[current_frame_len_], data,
- amount_to_consume);
- current_frame_len_ += amount_to_consume;
- data += amount_to_consume;
- len -= amount_to_consume;
- remaining_control_payload_ -= amount_to_consume;
- remaining_payload_ -= amount_to_consume;
- if (remaining_control_payload_)
- break;
- }
- SpdyControlFrame control_frame(current_frame_buffer_, false);
- visitor_->OnControl(&control_frame);
-
- // If this is a FIN, tell the caller.
- if (control_frame.type() == SYN_REPLY &&
- control_frame.flags() & CONTROL_FLAG_FIN) {
- visitor_->OnStreamFrameData(reinterpret_cast<SpdySynReplyControlFrame*>(
- &control_frame)->stream_id(),
- NULL, 0);
- }
-
- CHANGE_STATE(SPDY_IGNORE_REMAINING_PAYLOAD);
- } while (false);
- return original_len - len;
-}
-
-size_t SpdyFramer::ProcessDataFramePayload(const char* data, size_t len) {
- size_t original_len = len;
-
- SpdyDataFrame current_data_frame(current_frame_buffer_, false);
- if (remaining_payload_) {
- size_t amount_to_forward = std::min(remaining_payload_, len);
- if (amount_to_forward && state_ != SPDY_IGNORE_REMAINING_PAYLOAD) {
- if (current_data_frame.flags() & DATA_FLAG_COMPRESSED) {
- z_stream* decompressor =
- GetStreamDecompressor(current_data_frame.stream_id());
- if (!decompressor)
- return 0;
-
- size_t decompressed_max_size = amount_to_forward * 100;
- scoped_array<char> decompressed(new char[decompressed_max_size]);
- decompressor->next_in = reinterpret_cast<Bytef*>(
- const_cast<char*>(data));
- decompressor->avail_in = amount_to_forward;
- decompressor->next_out =
- reinterpret_cast<Bytef*>(decompressed.get());
- decompressor->avail_out = decompressed_max_size;
-
- int rv = inflate(decompressor, Z_SYNC_FLUSH);
- if (rv != Z_OK) {
- LOG(WARNING) << "inflate failure: " << rv;
- set_error(SPDY_DECOMPRESS_FAILURE);
- return 0;
- }
- size_t decompressed_size = decompressed_max_size -
- decompressor->avail_out;
-
- // Only inform the visitor if there is data.
- if (decompressed_size)
- visitor_->OnStreamFrameData(current_data_frame.stream_id(),
- decompressed.get(),
- decompressed_size);
- amount_to_forward -= decompressor->avail_in;
- } else {
- // The data frame was not compressed.
- // Only inform the visitor if there is data.
- if (amount_to_forward)
- visitor_->OnStreamFrameData(current_data_frame.stream_id(),
- data, amount_to_forward);
- }
- }
- data += amount_to_forward;
- len -= amount_to_forward;
- remaining_payload_ -= amount_to_forward;
-
- // If the FIN flag is set, and there is no more data in this data
- // frame, inform the visitor of EOF via a 0-length data frame.
- if (!remaining_payload_ &&
- current_data_frame.flags() & DATA_FLAG_FIN) {
- visitor_->OnStreamFrameData(current_data_frame.stream_id(), NULL, 0);
- CleanupDecompressorForStream(current_data_frame.stream_id());
- }
- } else {
- CHANGE_STATE(SPDY_AUTO_RESET);
+void SpdyFramer::Reset() {
+ state_ = SPDY_RESET;
+ error_code_ = SPDY_NO_ERROR;
+ remaining_payload_ = 0;
+ remaining_control_payload_ = 0;
+ current_frame_len_ = 0;
+ if (current_frame_capacity_ != kControlFrameBufferInitialSize) {
+ delete [] current_frame_buffer_;
+ current_frame_buffer_ = 0;
+ current_frame_capacity_ = 0;
+ ExpandControlFrameBuffer(kControlFrameBufferInitialSize);
}
- return original_len - len;
-}
-
-void SpdyFramer::ExpandControlFrameBuffer(size_t size) {
- size_t alloc_size = size + SpdyFrame::size();
- DCHECK_LT(alloc_size, kControlFrameBufferMaxSize);
- if (alloc_size <= current_frame_capacity_)
- return;
- char* new_buffer = new char[alloc_size];
- memcpy(new_buffer, current_frame_buffer_, current_frame_len_);
- delete [] current_frame_buffer_;
- current_frame_capacity_ = alloc_size;
- current_frame_buffer_ = new_buffer;
}
bool SpdyFramer::ParseHeaderBlock(const SpdyFrame* frame,
@@ -508,26 +235,6 @@ bool SpdyFramer::ParseHeaderBlock(const SpdyFrame* frame,
return false;
}
-/* static */
-bool SpdyFramer::ParseSettings(const SpdySettingsControlFrame* frame,
- SpdySettings* settings) {
- DCHECK_EQ(frame->type(), SETTINGS);
- DCHECK(settings);
-
- SpdyFrameBuilder parser(frame->header_block(), frame->header_block_len());
- void* iter = NULL;
- for (size_t index = 0; index < frame->num_entries(); ++index) {
- uint32 id;
- uint32 value;
- if (!parser.ReadUInt32(&iter, &id))
- return false;
- if (!parser.ReadUInt32(&iter, &value))
- return false;
- settings->insert(settings->end(), std::make_pair(id, value));
- }
- return true;
-}
-
SpdySynStreamControlFrame* SpdyFramer::CreateSynStream(
SpdyStreamId stream_id, SpdyStreamId associated_stream_id, int priority,
SpdyControlFlags flags, bool compressed, SpdyHeaderBlock* headers) {
@@ -727,6 +434,26 @@ SpdyWindowUpdateControlFrame* SpdyFramer::CreateWindowUpdate(
return reinterpret_cast<SpdyWindowUpdateControlFrame*>(frame.take());
}
+/* static */
+bool SpdyFramer::ParseSettings(const SpdySettingsControlFrame* frame,
+ SpdySettings* settings) {
+ DCHECK_EQ(frame->type(), SETTINGS);
+ DCHECK(settings);
+
+ SpdyFrameBuilder parser(frame->header_block(), frame->header_block_len());
+ void* iter = NULL;
+ for (size_t index = 0; index < frame->num_entries(); ++index) {
+ uint32 id;
+ uint32 value;
+ if (!parser.ReadUInt32(&iter, &id))
+ return false;
+ if (!parser.ReadUInt32(&iter, &value))
+ return false;
+ settings->insert(settings->end(), std::make_pair(id, value));
+ }
+ return true;
+}
+
SpdyDataFrame* SpdyFramer::CreateDataFrame(SpdyStreamId stream_id,
const char* data,
uint32 len, SpdyDataFlags flags) {
@@ -759,32 +486,318 @@ SpdyDataFrame* SpdyFramer::CreateDataFrame(SpdyStreamId stream_id,
return rv;
}
-// The following compression setting are based on Brian Olson's analysis. See
-// https://groups.google.com/group/spdy-dev/browse_thread/thread/dfaf498542fac792
-// for more details.
-static const int kCompressorLevel = 9;
-static const int kCompressorWindowSizeInBits = 11;
-static const int kCompressorMemLevel = 1;
+SpdyFrame* SpdyFramer::CompressFrame(const SpdyFrame& frame) {
+ if (frame.is_control_frame()) {
+ return CompressControlFrame(
+ reinterpret_cast<const SpdyControlFrame&>(frame));
+ }
+ return CompressDataFrame(reinterpret_cast<const SpdyDataFrame&>(frame));
+}
-// This is just a hacked dictionary to use for shrinking HTTP-like headers.
-// TODO(mbelshe): Use a scientific methodology for computing the dictionary.
-const char SpdyFramer::kDictionary[] =
- "optionsgetheadpostputdeletetraceacceptaccept-charsetaccept-encodingaccept-"
- "languageauthorizationexpectfromhostif-modified-sinceif-matchif-none-matchi"
- "f-rangeif-unmodifiedsincemax-forwardsproxy-authorizationrangerefererteuser"
- "-agent10010120020120220320420520630030130230330430530630740040140240340440"
- "5406407408409410411412413414415416417500501502503504505accept-rangesageeta"
- "glocationproxy-authenticatepublicretry-afterservervarywarningwww-authentic"
- "ateallowcontent-basecontent-encodingcache-controlconnectiondatetrailertran"
- "sfer-encodingupgradeviawarningcontent-languagecontent-lengthcontent-locati"
- "oncontent-md5content-rangecontent-typeetagexpireslast-modifiedset-cookieMo"
- "ndayTuesdayWednesdayThursdayFridaySaturdaySundayJanFebMarAprMayJunJulAugSe"
- "pOctNovDecchunkedtext/htmlimage/pngimage/jpgimage/gifapplication/xmlapplic"
- "ation/xhtmltext/plainpublicmax-agecharset=iso-8859-1utf-8gzipdeflateHTTP/1"
- ".1statusversionurl";
-const int SpdyFramer::kDictionarySize = arraysize(kDictionary);
+SpdyFrame* SpdyFramer::DecompressFrame(const SpdyFrame& frame) {
+ if (frame.is_control_frame()) {
+ return DecompressControlFrame(
+ reinterpret_cast<const SpdyControlFrame&>(frame));
+ }
+ return DecompressDataFrame(reinterpret_cast<const SpdyDataFrame&>(frame));
+}
+
+SpdyFrame* SpdyFramer::DuplicateFrame(const SpdyFrame& frame) {
+ int size = SpdyFrame::size() + frame.length();
+ SpdyFrame* new_frame = new SpdyFrame(size);
+ memcpy(new_frame->data(), frame.data(), size);
+ return new_frame;
+}
+
+bool SpdyFramer::IsCompressible(const SpdyFrame& frame) const {
+ // The important frames to compress are those which contain large
+ // amounts of compressible data - namely the headers in the SYN_STREAM
+ // and SYN_REPLY.
+ // TODO(mbelshe): Reconcile this with the spec when the spec is
+ // explicit about which frames compress and which do not.
+ if (frame.is_control_frame()) {
+ const SpdyControlFrame& control_frame =
+ reinterpret_cast<const SpdyControlFrame&>(frame);
+ return control_frame.type() == SYN_STREAM ||
+ control_frame.type() == SYN_REPLY;
+ }
+
+ const SpdyDataFrame& data_frame =
+ reinterpret_cast<const SpdyDataFrame&>(frame);
+ return (data_frame.flags() & DATA_FLAG_COMPRESSED) != 0;
+}
+
+const char* SpdyFramer::StateToString(int state) {
+ switch (state) {
+ case SPDY_ERROR:
+ return "ERROR";
+ case SPDY_DONE:
+ return "DONE";
+ case SPDY_AUTO_RESET:
+ return "AUTO_RESET";
+ case SPDY_RESET:
+ return "RESET";
+ case SPDY_READING_COMMON_HEADER:
+ return "READING_COMMON_HEADER";
+ case SPDY_INTERPRET_CONTROL_FRAME_COMMON_HEADER:
+ return "INTERPRET_CONTROL_FRAME_COMMON_HEADER";
+ case SPDY_CONTROL_FRAME_PAYLOAD:
+ return "CONTROL_FRAME_PAYLOAD";
+ case SPDY_IGNORE_REMAINING_PAYLOAD:
+ return "IGNORE_REMAINING_PAYLOAD";
+ case SPDY_FORWARD_STREAM_FRAME:
+ return "FORWARD_STREAM_FRAME";
+ }
+ return "UNKNOWN_STATE";
+}
+
+const char* SpdyFramer::ErrorCodeToString(int error_code) {
+ switch (error_code) {
+ case SPDY_NO_ERROR:
+ return "NO_ERROR";
+ case SPDY_INVALID_CONTROL_FRAME:
+ return "INVALID_CONTROL_FRAME";
+ case SPDY_CONTROL_PAYLOAD_TOO_LARGE:
+ return "CONTROL_PAYLOAD_TOO_LARGE";
+ case SPDY_ZLIB_INIT_FAILURE:
+ return "ZLIB_INIT_FAILURE";
+ case SPDY_UNSUPPORTED_VERSION:
+ return "UNSUPPORTED_VERSION";
+ case SPDY_DECOMPRESS_FAILURE:
+ return "DECOMPRESS_FAILURE";
+ case SPDY_COMPRESS_FAILURE:
+ return "COMPRESS_FAILURE";
+ }
+ return "UNKNOWN_ERROR";
+}
-static uLong dictionary_id = 0;
+void SpdyFramer::set_enable_compression(bool value) {
+ enable_compression_ = value;
+}
+
+void SpdyFramer::set_enable_compression_default(bool value) {
+ compression_default_ = value;
+}
+
+size_t SpdyFramer::ProcessCommonHeader(const char* data, size_t len) {
+ // This should only be called when we're in the SPDY_READING_COMMON_HEADER
+ // state.
+ DCHECK_EQ(state_, SPDY_READING_COMMON_HEADER);
+
+ size_t original_len = len;
+ SpdyFrame current_frame(current_frame_buffer_, false);
+
+ do {
+ if (current_frame_len_ < SpdyFrame::size()) {
+ size_t bytes_desired = SpdyFrame::size() - current_frame_len_;
+ size_t bytes_to_append = std::min(bytes_desired, len);
+ char* header_buffer = current_frame_buffer_;
+ memcpy(&header_buffer[current_frame_len_], data, bytes_to_append);
+ current_frame_len_ += bytes_to_append;
+ data += bytes_to_append;
+ len -= bytes_to_append;
+ // Check for an empty data frame.
+ if (current_frame_len_ == SpdyFrame::size() &&
+ !current_frame.is_control_frame() &&
+ current_frame.length() == 0) {
+ if (current_frame.flags() & CONTROL_FLAG_FIN) {
+ SpdyDataFrame data_frame(current_frame_buffer_, false);
+ visitor_->OnStreamFrameData(data_frame.stream_id(), NULL, 0);
+ }
+ CHANGE_STATE(SPDY_AUTO_RESET);
+ }
+ break;
+ }
+ remaining_payload_ = current_frame.length();
+
+ // This is just a sanity check for help debugging early frame errors.
+ if (remaining_payload_ > 1000000u) {
+ LOG(WARNING) <<
+ "Unexpectedly large frame. Spdy session is likely corrupt.";
+ }
+
+ // if we're here, then we have the common header all received.
+ if (!current_frame.is_control_frame())
+ CHANGE_STATE(SPDY_FORWARD_STREAM_FRAME);
+ else
+ CHANGE_STATE(SPDY_INTERPRET_CONTROL_FRAME_COMMON_HEADER);
+ } while (false);
+
+ return original_len - len;
+}
+
+void SpdyFramer::ProcessControlFrameHeader() {
+ DCHECK_EQ(SPDY_NO_ERROR, error_code_);
+ DCHECK_LE(SpdyFrame::size(), current_frame_len_);
+ SpdyControlFrame current_control_frame(current_frame_buffer_, false);
+
+ // We check version before we check validity: version can never be 'invalid',
+ // it can only be unsupported.
+ if (current_control_frame.version() != spdy_version_) {
+ set_error(SPDY_UNSUPPORTED_VERSION);
+ return;
+ }
+
+ // Next up, check to see if we have valid data. This should be after version
+ // checking (otherwise if the the type were out of bounds due to a version
+ // upgrade we would misclassify the error) and before checking the type
+ // (type can definitely be out of bounds)
+ if (!current_control_frame.AppearsToBeAValidControlFrame()) {
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ return;
+ }
+
+ // Do some sanity checking on the control frame sizes.
+ switch (current_control_frame.type()) {
+ case SYN_STREAM:
+ if (current_control_frame.length() <
+ SpdySynStreamControlFrame::size() - SpdyControlFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ case SYN_REPLY:
+ if (current_control_frame.length() <
+ SpdySynReplyControlFrame::size() - SpdyControlFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ case RST_STREAM:
+ if (current_control_frame.length() !=
+ SpdyRstStreamControlFrame::size() - SpdyFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ case SETTINGS:
+ if (current_control_frame.length() <
+ SpdySettingsControlFrame::size() - SpdyControlFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ case NOOP:
+ // NOOP. Swallow it.
+ CHANGE_STATE(SPDY_AUTO_RESET);
+ return;
+ case GOAWAY:
+ if (current_control_frame.length() !=
+ SpdyGoAwayControlFrame::size() - SpdyFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ case HEADERS:
+ if (current_control_frame.length() <
+ SpdyHeadersControlFrame::size() - SpdyControlFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ case WINDOW_UPDATE:
+ if (current_control_frame.length() !=
+ SpdyWindowUpdateControlFrame::size() - SpdyControlFrame::size())
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ default:
+ LOG(WARNING) << "Valid spdy control frame with unknown type: "
+ << current_control_frame.type();
+ DCHECK(false);
+ set_error(SPDY_INVALID_CONTROL_FRAME);
+ break;
+ }
+
+ remaining_control_payload_ = current_control_frame.length();
+ if (remaining_control_payload_ > kControlFrameBufferMaxSize) {
+ set_error(SPDY_CONTROL_PAYLOAD_TOO_LARGE);
+ return;
+ }
+
+ ExpandControlFrameBuffer(remaining_control_payload_);
+ CHANGE_STATE(SPDY_CONTROL_FRAME_PAYLOAD);
+}
+
+size_t SpdyFramer::ProcessControlFramePayload(const char* data, size_t len) {
+ size_t original_len = len;
+ do {
+ if (remaining_control_payload_) {
+ size_t amount_to_consume = std::min(remaining_control_payload_, len);
+ memcpy(&current_frame_buffer_[current_frame_len_], data,
+ amount_to_consume);
+ current_frame_len_ += amount_to_consume;
+ data += amount_to_consume;
+ len -= amount_to_consume;
+ remaining_control_payload_ -= amount_to_consume;
+ remaining_payload_ -= amount_to_consume;
+ if (remaining_control_payload_)
+ break;
+ }
+ SpdyControlFrame control_frame(current_frame_buffer_, false);
+ visitor_->OnControl(&control_frame);
+
+ // If this is a FIN, tell the caller.
+ if (control_frame.type() == SYN_REPLY &&
+ control_frame.flags() & CONTROL_FLAG_FIN) {
+ visitor_->OnStreamFrameData(reinterpret_cast<SpdySynReplyControlFrame*>(
+ &control_frame)->stream_id(),
+ NULL, 0);
+ }
+
+ CHANGE_STATE(SPDY_IGNORE_REMAINING_PAYLOAD);
+ } while (false);
+ return original_len - len;
+}
+
+size_t SpdyFramer::ProcessDataFramePayload(const char* data, size_t len) {
+ size_t original_len = len;
+
+ SpdyDataFrame current_data_frame(current_frame_buffer_, false);
+ if (remaining_payload_) {
+ size_t amount_to_forward = std::min(remaining_payload_, len);
+ if (amount_to_forward && state_ != SPDY_IGNORE_REMAINING_PAYLOAD) {
+ if (current_data_frame.flags() & DATA_FLAG_COMPRESSED) {
+ z_stream* decompressor =
+ GetStreamDecompressor(current_data_frame.stream_id());
+ if (!decompressor)
+ return 0;
+
+ size_t decompressed_max_size = amount_to_forward * 100;
+ scoped_array<char> decompressed(new char[decompressed_max_size]);
+ decompressor->next_in = reinterpret_cast<Bytef*>(
+ const_cast<char*>(data));
+ decompressor->avail_in = amount_to_forward;
+ decompressor->next_out =
+ reinterpret_cast<Bytef*>(decompressed.get());
+ decompressor->avail_out = decompressed_max_size;
+
+ int rv = inflate(decompressor, Z_SYNC_FLUSH);
+ if (rv != Z_OK) {
+ LOG(WARNING) << "inflate failure: " << rv;
+ set_error(SPDY_DECOMPRESS_FAILURE);
+ return 0;
+ }
+ size_t decompressed_size = decompressed_max_size -
+ decompressor->avail_out;
+
+ // Only inform the visitor if there is data.
+ if (decompressed_size)
+ visitor_->OnStreamFrameData(current_data_frame.stream_id(),
+ decompressed.get(),
+ decompressed_size);
+ amount_to_forward -= decompressor->avail_in;
+ } else {
+ // The data frame was not compressed.
+ // Only inform the visitor if there is data.
+ if (amount_to_forward)
+ visitor_->OnStreamFrameData(current_data_frame.stream_id(),
+ data, amount_to_forward);
+ }
+ }
+ data += amount_to_forward;
+ len -= amount_to_forward;
+ remaining_payload_ -= amount_to_forward;
+
+ // If the FIN flag is set, and there is no more data in this data
+ // frame, inform the visitor of EOF via a 0-length data frame.
+ if (!remaining_payload_ &&
+ current_data_frame.flags() & DATA_FLAG_FIN) {
+ visitor_->OnStreamFrameData(current_data_frame.stream_id(), NULL, 0);
+ CleanupDecompressorForStream(current_data_frame.stream_id());
+ }
+ } else {
+ CHANGE_STATE(SPDY_AUTO_RESET);
+ }
+ return original_len - len;
+}
z_stream* SpdyFramer::GetHeaderCompressor() {
if (header_compressor_.get())
@@ -873,74 +886,6 @@ z_stream* SpdyFramer::GetStreamDecompressor(SpdyStreamId stream_id) {
return stream_decompressors_[stream_id] = decompressor.release();
}
-bool SpdyFramer::GetFrameBoundaries(const SpdyFrame& frame,
- int* payload_length,
- int* header_length,
- const char** payload) const {
- size_t frame_size;
- if (frame.is_control_frame()) {
- const SpdyControlFrame& control_frame =
- reinterpret_cast<const SpdyControlFrame&>(frame);
- switch (control_frame.type()) {
- case SYN_STREAM:
- {
- const SpdySynStreamControlFrame& syn_frame =
- reinterpret_cast<const SpdySynStreamControlFrame&>(frame);
- frame_size = SpdySynStreamControlFrame::size();
- *payload_length = syn_frame.header_block_len();
- *header_length = frame_size;
- *payload = frame.data() + *header_length;
- }
- break;
- case SYN_REPLY:
- {
- const SpdySynReplyControlFrame& syn_frame =
- reinterpret_cast<const SpdySynReplyControlFrame&>(frame);
- frame_size = SpdySynReplyControlFrame::size();
- *payload_length = syn_frame.header_block_len();
- *header_length = frame_size;
- *payload = frame.data() + *header_length;
- }
- break;
- case HEADERS:
- {
- const SpdyHeadersControlFrame& headers_frame =
- reinterpret_cast<const SpdyHeadersControlFrame&>(frame);
- frame_size = SpdyHeadersControlFrame::size();
- *payload_length = headers_frame.header_block_len();
- *header_length = frame_size;
- *payload = frame.data() + *header_length;
- }
- break;
- default:
- // TODO(mbelshe): set an error?
- return false; // We can't compress this frame!
- }
- } else {
- frame_size = SpdyFrame::size();
- *header_length = frame_size;
- *payload_length = frame.length();
- *payload = frame.data() + SpdyFrame::size();
- }
- return true;
-}
-
-SpdyFrame* SpdyFramer::CompressFrame(const SpdyFrame& frame) {
- if (frame.is_control_frame()) {
- return CompressControlFrame(
- reinterpret_cast<const SpdyControlFrame&>(frame));
- }
- return CompressDataFrame(reinterpret_cast<const SpdyDataFrame&>(frame));
-}
-
-SpdyFrame* SpdyFramer::DecompressFrame(const SpdyFrame& frame) {
- if (frame.is_control_frame()) {
- return DecompressControlFrame(
- reinterpret_cast<const SpdyControlFrame&>(frame));
- }
- return DecompressDataFrame(reinterpret_cast<const SpdyDataFrame&>(frame));
-}
-
SpdyControlFrame* SpdyFramer::CompressControlFrame(
const SpdyControlFrame& frame) {
z_stream* compressor = GetHeaderCompressor();
@@ -950,6 +895,14 @@ SpdyControlFrame* SpdyFramer::CompressControlFrame(
CompressFrameWithZStream(frame, compressor));
}
+SpdyDataFrame* SpdyFramer::CompressDataFrame(const SpdyDataFrame& frame) {
+ z_stream* compressor = GetStreamCompressor(frame.stream_id());
+ if (!compressor)
+ return NULL;
+ return reinterpret_cast<SpdyDataFrame*>(
+ CompressFrameWithZStream(frame, compressor));
+}
+
SpdyControlFrame* SpdyFramer::DecompressControlFrame(
const SpdyControlFrame& frame) {
z_stream* decompressor = GetHeaderDecompressor();
@@ -959,14 +912,6 @@ SpdyControlFrame* SpdyFramer::DecompressControlFrame(
DecompressFrameWithZStream(frame, decompressor));
}
-SpdyDataFrame* SpdyFramer::CompressDataFrame(const SpdyDataFrame& frame) {
- z_stream* compressor = GetStreamCompressor(frame.stream_id());
- if (!compressor)
- return NULL;
- return reinterpret_cast<SpdyDataFrame*>(
- CompressFrameWithZStream(frame, compressor));
-}
-
SpdyDataFrame* SpdyFramer::DecompressDataFrame(const SpdyDataFrame& frame) {
z_stream* decompressor = GetStreamDecompressor(frame.stream_id());
if (!decompressor)
@@ -1145,37 +1090,96 @@ void SpdyFramer::CleanupStreamCompressorsAndDecompressors() {
stream_decompressors_.clear();
}
-SpdyFrame* SpdyFramer::DuplicateFrame(const SpdyFrame& frame) {
- int size = SpdyFrame::size() + frame.length();
- SpdyFrame* new_frame = new SpdyFrame(size);
- memcpy(new_frame->data(), frame.data(), size);
- return new_frame;
-}
-
-bool SpdyFramer::IsCompressible(const SpdyFrame& frame) const {
- // The important frames to compress are those which contain large
- // amounts of compressible data - namely the headers in the SYN_STREAM
- // and SYN_REPLY.
- // TODO(mbelshe): Reconcile this with the spec when the spec is
- // explicit about which frames compress and which do not.
- if (frame.is_control_frame()) {
- const SpdyControlFrame& control_frame =
- reinterpret_cast<const SpdyControlFrame&>(frame);
- return control_frame.type() == SYN_STREAM ||
- control_frame.type() == SYN_REPLY;
+size_t SpdyFramer::BytesSafeToRead() const {
+ switch (state_) {
+ case SPDY_ERROR:
+ case SPDY_DONE:
+ case SPDY_AUTO_RESET:
+ case SPDY_RESET:
+ return 0;
+ case SPDY_READING_COMMON_HEADER:
+ DCHECK_LT(current_frame_len_, SpdyFrame::size());
+ return SpdyFrame::size() - current_frame_len_;
+ case SPDY_INTERPRET_CONTROL_FRAME_COMMON_HEADER:
+ return 0;
+ case SPDY_CONTROL_FRAME_PAYLOAD:
+ case SPDY_IGNORE_REMAINING_PAYLOAD:
+ case SPDY_FORWARD_STREAM_FRAME:
+ return remaining_payload_;
}
+ // We should never get to here.
+ return 0;
+}
- const SpdyDataFrame& data_frame =
- reinterpret_cast<const SpdyDataFrame&>(frame);
- return (data_frame.flags() & DATA_FLAG_COMPRESSED) != 0;
+void SpdyFramer::set_error(SpdyError error) {
+ DCHECK(visitor_);
+ error_code_ = error;
+ CHANGE_STATE(SPDY_ERROR);
+ visitor_->OnError(this);
}
-void SpdyFramer::set_enable_compression(bool value) {
- enable_compression_ = value;
+void SpdyFramer::ExpandControlFrameBuffer(size_t size) {
+ size_t alloc_size = size + SpdyFrame::size();
+ DCHECK_LT(alloc_size, kControlFrameBufferMaxSize);
+ if (alloc_size <= current_frame_capacity_)
+ return;
+ char* new_buffer = new char[alloc_size];
+ memcpy(new_buffer, current_frame_buffer_, current_frame_len_);
+ delete [] current_frame_buffer_;
+ current_frame_capacity_ = alloc_size;
+ current_frame_buffer_ = new_buffer;
}
-void SpdyFramer::set_enable_compression_default(bool value) {
- compression_default_ = value;
+bool SpdyFramer::GetFrameBoundaries(const SpdyFrame& frame,
+ int* payload_length,
+ int* header_length,
+ const char** payload) const {
+ size_t frame_size;
+ if (frame.is_control_frame()) {
+ const SpdyControlFrame& control_frame =
+ reinterpret_cast<const SpdyControlFrame&>(frame);
+ switch (control_frame.type()) {
+ case SYN_STREAM:
+ {
+ const SpdySynStreamControlFrame& syn_frame =
+ reinterpret_cast<const SpdySynStreamControlFrame&>(frame);
+ frame_size = SpdySynStreamControlFrame::size();
+ *payload_length = syn_frame.header_block_len();
+ *header_length = frame_size;
+ *payload = frame.data() + *header_length;
+ }
+ break;
+ case SYN_REPLY:
+ {
+ const SpdySynReplyControlFrame& syn_frame =
+ reinterpret_cast<const SpdySynReplyControlFrame&>(frame);
+ frame_size = SpdySynReplyControlFrame::size();
+ *payload_length = syn_frame.header_block_len();
+ *header_length = frame_size;
+ *payload = frame.data() + *header_length;
+ }
+ break;
+ case HEADERS:
+ {
+ const SpdyHeadersControlFrame& headers_frame =
+ reinterpret_cast<const SpdyHeadersControlFrame&>(frame);
+ frame_size = SpdyHeadersControlFrame::size();
+ *payload_length = headers_frame.header_block_len();
+ *header_length = frame_size;
+ *payload = frame.data() + *header_length;
+ }
+ break;
+ default:
+ // TODO(mbelshe): set an error?
+ return false; // We can't compress this frame!
+ }
+ } else {
+ frame_size = SpdyFrame::size();
+ *header_length = frame_size;
+ *payload_length = frame.length();
+ *payload = frame.data() + SpdyFrame::size();
+ }
+ return true;
}
} // namespace spdy