summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authoreroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-02 07:19:03 +0000
committereroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-09-02 07:19:03 +0000
commit5d7b373e7feee0fafc4d1cd38b3e38f2a969da90 (patch)
treee2daf386b3d8b82b789a5d2ac0b6093a559a18b3 /net
parent7f1768a00fc5bfc03f35df04c11677bca12a7377 (diff)
downloadchromium_src-5d7b373e7feee0fafc4d1cd38b3e38f2a969da90.zip
chromium_src-5d7b373e7feee0fafc4d1cd38b3e38f2a969da90.tar.gz
chromium_src-5d7b373e7feee0fafc4d1cd38b3e38f2a969da90.tar.bz2
Keep track of the live URLRequest instances, and the last 25 that were destroyed.
This functionality will be used by the "about:net-internal" page to display profiling information for the in-progress requests, as well as the recently completed requests. This does not have any performance impact. Note that all of the tracking operations are constant time. In particular: * Global tracking of all URLRequest instances is done by chaining them as linked list nodes. So we get constant-time insertion/deletion without needing to do any extra heap allocs. * The recent requests list is a circular queue, backed by an array. So insertions are constant time (and we never erase entries, just overwrite). Moreover, the entry types themselves are comprised of {GURL, LoadLog*}, so very little copying actually happens -- LoadLog is refcounted so copy is cheap, and GURL is backed by a std::string which is also refcounted so the copy is cheap. R=darin BUG=http://crbug.com/14478 TEST=unittests. Review URL: http://codereview.chromium.org/173175 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@25158 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/url_request/url_request.cc125
-rw-r--r--net/url_request/url_request.h80
-rw-r--r--net/url_request/url_request_unittest.cc153
3 files changed, 258 insertions, 100 deletions
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index bf8fd5a..e41965c 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -4,6 +4,7 @@
#include "net/url_request/url_request.h"
+#include "base/compiler_specific.h"
#include "base/message_loop.h"
#include "base/singleton.h"
#include "base/stats_counters.h"
@@ -19,10 +20,6 @@
#include "net/url_request/url_request_job.h"
#include "net/url_request/url_request_job_manager.h"
-#ifndef NDEBUG
-URLRequestMetrics url_request_metrics;
-#endif
-
using base::Time;
using net::UploadData;
using std::string;
@@ -36,6 +33,97 @@ static URLRequestJobManager* GetJobManager() {
}
///////////////////////////////////////////////////////////////////////////////
+// URLRequest::InstanceTracker
+
+const size_t URLRequest::InstanceTracker::kMaxGraveyardSize = 25;
+const size_t URLRequest::InstanceTracker::kMaxGraveyardURLSize = 1000;
+
+URLRequest::InstanceTracker::~InstanceTracker() {
+ base::LeakTracker<URLRequest>::CheckForLeaks();
+
+ // Check in release mode as well, since we have the info.
+ CHECK(0u == GetLiveRequests().size());
+}
+
+// static
+URLRequest::InstanceTracker* URLRequest::InstanceTracker::Get() {
+ return Singleton<InstanceTracker>::get();
+}
+
+std::vector<URLRequest*> URLRequest::InstanceTracker::GetLiveRequests() {
+ std::vector<URLRequest*> list;
+ for (base::LinkNode<InstanceTrackerNode>* node = live_instances_.head();
+ node != live_instances_.end();
+ node = node->next()) {
+ URLRequest* url_request = node->value()->url_request();
+ list.push_back(url_request);
+ }
+ return list;
+}
+
+void URLRequest::InstanceTracker::ClearRecentlyDeceased() {
+ next_graveyard_index_ = 0;
+ graveyard_.clear();
+}
+
+const URLRequest::InstanceTracker::RecentRequestInfoList
+URLRequest::InstanceTracker::GetRecentlyDeceased() {
+ RecentRequestInfoList list;
+
+ // Copy the items from |graveyard_| (our circular queue of recently
+ // deceased request infos) into a vector, ordered from oldest to
+ // newest.
+ for (size_t i = 0; i < graveyard_.size(); ++i) {
+ size_t index = (next_graveyard_index_ + i) % graveyard_.size();
+ list.push_back(graveyard_[index]);
+ }
+ return list;
+}
+
+URLRequest::InstanceTracker::InstanceTracker() : next_graveyard_index_(0) {}
+
+void URLRequest::InstanceTracker::Add(InstanceTrackerNode* node) {
+ live_instances_.Append(node);
+}
+
+void URLRequest::InstanceTracker::Remove(InstanceTrackerNode* node) {
+ // Remove from |live_instances_|.
+ node->RemoveFromList();
+
+ // Add into |graveyard_|.
+ InsertIntoGraveyard(ExtractInfo(node->url_request()));
+}
+
+// static
+const URLRequest::InstanceTracker::RecentRequestInfo
+URLRequest::InstanceTracker::ExtractInfo(URLRequest* url_request) {
+ RecentRequestInfo info;
+ info.original_url = url_request->original_url();
+ info.load_log = url_request->load_log();
+
+ // Paranoia check: truncate really big URLs.
+ if (info.original_url.spec().size() > kMaxGraveyardURLSize) {
+ info.original_url = GURL(url_request->original_url().spec().substr(
+ 0, kMaxGraveyardURLSize));
+ }
+ return info;
+}
+
+void URLRequest::InstanceTracker::InsertIntoGraveyard(
+ const RecentRequestInfo& info) {
+ if (graveyard_.size() < kMaxGraveyardSize) {
+ // Still growing to maximum capacity.
+ DCHECK_EQ(next_graveyard_index_, graveyard_.size());
+ graveyard_.push_back(info);
+ } else {
+ // At maximum capacity, overwrite the oldest entry.
+ graveyard_[next_graveyard_index_] = info;
+ }
+
+ next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize;
+}
+
+///////////////////////////////////////////////////////////////////////////////
// URLRequest
URLRequest::URLRequest(const GURL& url, Delegate* delegate)
@@ -49,8 +137,8 @@ URLRequest::URLRequest(const GURL& url, Delegate* delegate)
enable_profiling_(false),
redirect_limit_(kMaxRedirects),
final_upload_progress_(0),
- priority_(0) {
- URLREQUEST_COUNT_CTOR();
+ priority_(0),
+ ALLOW_THIS_IN_INITIALIZER_LIST(instance_tracker_node_(this)) {
SIMPLE_STATS_COUNTER("URLRequestCount");
// Sanity check out environment.
@@ -61,8 +149,6 @@ URLRequest::URLRequest(const GURL& url, Delegate* delegate)
}
URLRequest::~URLRequest() {
- URLREQUEST_COUNT_DTOR();
-
Cancel();
if (job_)
@@ -256,6 +342,20 @@ void URLRequest::Start() {
StartJob(GetJobManager()->CreateJob(this));
}
+///////////////////////////////////////////////////////////////////////////////
+// URLRequest::InstanceTrackerNode
+
+URLRequest::InstanceTrackerNode::
+InstanceTrackerNode(URLRequest* url_request) : url_request_(url_request) {
+ InstanceTracker::Get()->Add(this);
+}
+
+URLRequest::InstanceTrackerNode::~InstanceTrackerNode() {
+ InstanceTracker::Get()->Remove(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
void URLRequest::StartJob(URLRequestJob* job) {
DCHECK(!is_pending_);
DCHECK(!job_);
@@ -504,12 +604,3 @@ URLRequest::UserData* URLRequest::GetUserData(const void* key) const {
void URLRequest::SetUserData(const void* key, UserData* data) {
user_data_[key] = linked_ptr<UserData>(data);
}
-
-#ifndef NDEBUG
-
-URLRequestMetrics::~URLRequestMetrics() {
- DLOG_IF(WARNING, object_count != 0) <<
- "Leaking " << object_count << " URLRequest object(s)";
-}
-
-#endif
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index 14c6287..f45fa07 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -10,6 +10,7 @@
#include <vector>
#include "base/leak_tracker.h"
+#include "base/linked_list.h"
#include "base/linked_ptr.h"
#include "base/logging.h"
#include "base/ref_counted.h"
@@ -202,6 +203,8 @@ class URLRequest {
virtual void OnReadCompleted(URLRequest* request, int bytes_read) = 0;
};
+ class InstanceTracker;
+
// Initialize an URL request.
URLRequest(const GURL& url, Delegate* delegate);
@@ -524,6 +527,19 @@ class URLRequest {
private:
friend class URLRequestJob;
+ // Helper class to make URLRequest insertable into a base::LinkedList,
+ // without making the public interface expose base::LinkNode.
+ class InstanceTrackerNode : public base::LinkNode<InstanceTrackerNode> {
+ public:
+ InstanceTrackerNode(URLRequest* url_request);
+ ~InstanceTrackerNode();
+
+ URLRequest* url_request() const { return url_request_; }
+
+ private:
+ URLRequest* url_request_;
+ };
+
void StartJob(URLRequestJob* job);
// Restarting involves replacing the current job with a new one such as what
@@ -597,34 +613,64 @@ class URLRequest {
// this to determine which URLRequest to allocate sockets to first.
int priority_;
+ InstanceTrackerNode instance_tracker_node_;
base::LeakTracker<URLRequest> leak_tracker_;
DISALLOW_COPY_AND_ASSIGN(URLRequest);
};
-//-----------------------------------------------------------------------------
-// To help ensure that all requests are cleaned up properly, we keep static
-// counters of live objects. TODO(darin): Move this leak checking stuff into
-// a common place and generalize it so it can be used everywhere (Bug 566229).
+// ----------------------------------------------------------------------
+// Singleton to track all of the live instances of URLRequest, and
+// keep a circular queue of the LoadLogs for recently deceased requests.
+//
+class URLRequest::InstanceTracker {
+ public:
+ struct RecentRequestInfo {
+ GURL original_url;
+ scoped_refptr<net::LoadLog> load_log;
+ };
-#ifndef NDEBUG
+ typedef std::vector<RecentRequestInfo> RecentRequestInfoList;
-struct URLRequestMetrics {
- int object_count;
- URLRequestMetrics() : object_count(0) {}
- ~URLRequestMetrics();
-};
+ // The maximum number of entries for |graveyard_|.
+ static const size_t kMaxGraveyardSize;
-extern URLRequestMetrics url_request_metrics;
+ // The maximum size of URLs to stuff into RecentRequestInfo.
+ static const size_t kMaxGraveyardURLSize;
-#define URLREQUEST_COUNT_CTOR() url_request_metrics.object_count++
-#define URLREQUEST_COUNT_DTOR() url_request_metrics.object_count--
+ ~InstanceTracker();
-#else // disable leak checking in release builds...
+ // Returns the singleton instance of InstanceTracker.
+ static InstanceTracker* Get();
-#define URLREQUEST_COUNT_CTOR()
-#define URLREQUEST_COUNT_DTOR()
+ // Returns a list of URLRequests that are alive.
+ std::vector<URLRequest*> GetLiveRequests();
-#endif // #ifndef NDEBUG
+ // Clears the circular buffer of RecentRequestInfos.
+ void ClearRecentlyDeceased();
+
+ // Returns a list of recently completed URLRequests.
+ const RecentRequestInfoList GetRecentlyDeceased();
+
+ private:
+ friend class URLRequest;
+ friend struct DefaultSingletonTraits<InstanceTracker>;
+
+ InstanceTracker();
+
+ void Add(InstanceTrackerNode* node);
+ void Remove(InstanceTrackerNode* node);
+
+ // Copy the goodies out of |url_request| that we want to show the
+ // user later on the about:net-internal page.
+ static const RecentRequestInfo ExtractInfo(URLRequest* url_request);
+
+ void InsertIntoGraveyard(const RecentRequestInfo& info);
+
+ base::LinkedList<InstanceTrackerNode> live_instances_;
+
+ size_t next_graveyard_index_;
+ RecentRequestInfoList graveyard_;
+};
#endif // NET_URL_REQUEST_URL_REQUEST_H_
diff --git a/net/url_request/url_request_unittest.cc b/net/url_request/url_request_unittest.cc
index 640174f..f4a795b 100644
--- a/net/url_request/url_request_unittest.cc
+++ b/net/url_request/url_request_unittest.cc
@@ -117,6 +117,10 @@ scoped_refptr<net::UploadData> CreateSimpleUploadData(const char* data) {
// Inherit PlatformTest since we require the autorelease pool on Mac OS X.f
class URLRequestTest : public PlatformTest {
+ public:
+ ~URLRequestTest() {
+ EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size());
+ }
};
class URLRequestTestHTTP : public URLRequestTest {
@@ -199,9 +203,6 @@ TEST_F(URLRequestTestHTTP, GetTest_NoCache) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_NE(0, d.bytes_received());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, GetTest) {
@@ -219,9 +220,86 @@ TEST_F(URLRequestTestHTTP, GetTest) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_NE(0, d.bytes_received());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
+}
+
+// Test the instance tracking functionality of URLRequest.
+TEST_F(URLRequestTest, Tracking) {
+ URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased();
+ EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size());
+ EXPECT_EQ(0u,
+ URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size());
+
+ {
+ URLRequest req1(GURL("http://req1"), NULL);
+ URLRequest req2(GURL("http://req2"), NULL);
+ URLRequest req3(GURL("http://req3"), NULL);
+
+ std::vector<URLRequest*> live_reqs =
+ URLRequest::InstanceTracker::Get()->GetLiveRequests();
+ ASSERT_EQ(3u, live_reqs.size());
+ EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url());
+ EXPECT_EQ(GURL("http://req2"), live_reqs[1]->original_url());
+ EXPECT_EQ(GURL("http://req3"), live_reqs[2]->original_url());
+ }
+
+ EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size());
+
+ URLRequest::InstanceTracker::RecentRequestInfoList recent_reqs =
+ URLRequest::InstanceTracker::Get()->GetRecentlyDeceased();
+
+ // Note that the order is reversed from definition order, because
+ // this matches the destructor order.
+ ASSERT_EQ(3u, recent_reqs.size());
+ EXPECT_EQ(GURL("http://req3"), recent_reqs[0].original_url);
+ EXPECT_EQ(GURL("http://req2"), recent_reqs[1].original_url);
+ EXPECT_EQ(GURL("http://req1"), recent_reqs[2].original_url);
+}
+
+// Test the instance tracking functionality of URLRequest.
+TEST_F(URLRequestTest, TrackingGraveyardBounded) {
+ URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased();
+ EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size());
+ EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size());
+
+ const size_t kMaxGraveyardSize =
+ URLRequest::InstanceTracker::kMaxGraveyardSize;
+ const size_t kMaxURLLen = URLRequest::InstanceTracker::kMaxGraveyardURLSize;
+
+ // Add twice as many requests as will fit in the graveyard.
+ for (size_t i = 0; i < kMaxGraveyardSize * 2; ++i)
+ URLRequest req(GURL(StringPrintf("http://req%d", i).c_str()), NULL);
+
+ // Check that only the last |kMaxGraveyardSize| requests are in-memory.
+
+ URLRequest::InstanceTracker::RecentRequestInfoList recent_reqs =
+ URLRequest::InstanceTracker::Get()->GetRecentlyDeceased();
+
+ ASSERT_EQ(kMaxGraveyardSize, recent_reqs.size());
+
+ for (size_t i = 0; i < kMaxGraveyardSize; ++i) {
+ size_t req_number = i + kMaxGraveyardSize;
+ GURL url(StringPrintf("http://req%d", req_number).c_str());
+ EXPECT_EQ(url, recent_reqs[i].original_url);
+ }
+
+ URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased();
+ EXPECT_EQ(0u,
+ URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size());
+
+ // Check that very long URLs are truncated.
+ std::string big_url_spec("http://");
+ big_url_spec.resize(2 * kMaxURLLen, 'x');
+ GURL big_url(big_url_spec);
+ {
+ URLRequest req(big_url, NULL);
+ }
+ ASSERT_EQ(1u,
+ URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size());
+ // The +1 is because GURL canonicalizes with a trailing '/' ... maybe
+ // we should just save the std::string rather than the GURL.
+ EXPECT_EQ(kMaxURLLen + 1,
+ URLRequest::InstanceTracker::Get()->GetRecentlyDeceased()[0]
+ .original_url.spec().size());
}
TEST_F(URLRequestTestHTTP, SetExplicitlyAllowedPortsTest) {
@@ -248,10 +326,6 @@ TEST_F(URLRequestTest, QuitTest) {
ASSERT_TRUE(NULL != server.get());
server->SendQuit();
EXPECT_TRUE(server->WaitToFinish(20000));
-
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
class HTTPSRequestTest : public testing::Test {
@@ -314,9 +388,6 @@ TEST_F(HTTPSRequestTest, MAYBE_HTTPSGetTest) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_NE(0, d.bytes_received());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(HTTPSRequestTest, MAYBE_HTTPSMismatchedTest) {
@@ -395,9 +466,6 @@ TEST_F(URLRequestTestHTTP, CancelTest) {
EXPECT_EQ(0, d.bytes_received());
EXPECT_FALSE(d.received_data_before_response());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, CancelTest2) {
@@ -422,9 +490,6 @@ TEST_F(URLRequestTestHTTP, CancelTest2) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, CancelTest3) {
@@ -448,9 +513,6 @@ TEST_F(URLRequestTestHTTP, CancelTest3) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, CancelTest4) {
@@ -505,10 +567,6 @@ TEST_F(URLRequestTestHTTP, CancelTest5) {
EXPECT_EQ(0, d.bytes_received());
EXPECT_FALSE(d.received_data_before_response());
}
-
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, PostTest) {
@@ -554,9 +612,6 @@ TEST_F(URLRequestTestHTTP, PostTest) {
EXPECT_EQ(d.data_received().compare(uploadBytes), 0);
}
delete[] uploadBytes;
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, PostEmptyTest) {
@@ -577,9 +632,6 @@ TEST_F(URLRequestTestHTTP, PostEmptyTest) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_TRUE(d.data_received().empty());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, PostFileTest) {
@@ -627,9 +679,6 @@ TEST_F(URLRequestTestHTTP, PostFileTest) {
ASSERT_EQ(size, d.bytes_received());
EXPECT_EQ(0, memcmp(d.data_received().c_str(), buf.get(), size));
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTest, AboutBlankTest) {
@@ -646,9 +695,6 @@ TEST_F(URLRequestTest, AboutBlankTest) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_EQ(d.bytes_received(), 0);
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTest, FileTest) {
@@ -673,9 +719,6 @@ TEST_F(URLRequestTest, FileTest) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_EQ(d.bytes_received(), static_cast<int>(file_size));
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTest, FileTestFullSpecifiedRange) {
@@ -717,9 +760,6 @@ TEST_F(URLRequestTest, FileTestFullSpecifiedRange) {
}
EXPECT_TRUE(file_util::Delete(temp_path, false));
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTest, FileTestHalfSpecifiedRange) {
@@ -760,9 +800,6 @@ TEST_F(URLRequestTest, FileTestHalfSpecifiedRange) {
}
EXPECT_TRUE(file_util::Delete(temp_path, false));
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTest, FileTestMultipleRanges) {
@@ -791,9 +828,6 @@ TEST_F(URLRequestTest, FileTestMultipleRanges) {
}
EXPECT_TRUE(file_util::Delete(temp_path, false));
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTest, InvalidUrlTest) {
@@ -807,9 +841,6 @@ TEST_F(URLRequestTest, InvalidUrlTest) {
MessageLoop::current()->Run();
EXPECT_TRUE(d.request_failed());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
// This test is disabled because it fails on some computers due to proxies
@@ -825,9 +856,6 @@ TEST_F(URLRequestTest, DISABLED_DnsFailureTest) {
MessageLoop::current()->Run();
EXPECT_TRUE(d.request_failed());
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
TEST_F(URLRequestTestHTTP, ResponseHeadersTest) {
@@ -927,11 +955,11 @@ TEST_F(URLRequestTest, ResolveShortcutTest) {
CoInitialize(NULL);
// Temporarily create a shortcut for test
result = CoCreateInstance(CLSID_ShellLink, NULL,
- CLSCTX_INPROC_SERVER, IID_IShellLink,
- reinterpret_cast<LPVOID*>(&shell));
+ CLSCTX_INPROC_SERVER, IID_IShellLink,
+ reinterpret_cast<LPVOID*>(&shell));
ASSERT_TRUE(SUCCEEDED(result));
result = shell->QueryInterface(IID_IPersistFile,
- reinterpret_cast<LPVOID*>(&persist));
+ reinterpret_cast<LPVOID*>(&persist));
ASSERT_TRUE(SUCCEEDED(result));
result = shell->SetPath(app_path.value().c_str());
EXPECT_TRUE(SUCCEEDED(result));
@@ -976,10 +1004,6 @@ TEST_F(URLRequestTest, ResolveShortcutTest) {
// Clean the shortcut
DeleteFile(lnk_path.c_str());
CoUninitialize();
-
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
}
#endif // defined(OS_WIN)
@@ -1021,9 +1045,6 @@ TEST_F(URLRequestTest, FileDirCancelTest) {
MessageLoop::current()->Run();
}
-#ifndef NDEBUG
- DCHECK_EQ(url_request_metrics.object_count, 0);
-#endif
// Take out mock resource provider.
net::NetModule::SetResourceProvider(NULL);