summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorccameron@chromium.org <ccameron@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-07 12:14:03 +0000
committerccameron@chromium.org <ccameron@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-07 12:14:03 +0000
commit26d01364394b7f3e1cc1a48dee00761ad95a4bf8 (patch)
tree012622bbac5fd70cddb51a9f5be4114efec45345
parentba3d09a1464372826de29fea84f8d7c1605ebd11 (diff)
downloadchromium_src-26d01364394b7f3e1cc1a48dee00761ad95a4bf8.zip
chromium_src-26d01364394b7f3e1cc1a48dee00761ad95a4bf8.tar.gz
chromium_src-26d01364394b7f3e1cc1a48dee00761ad95a4bf8.tar.bz2
Track the total amount of managed memory allocated by all clients.
Remove tracking of the historical maximum, since it wasn't used. Also move the ifdef-heavy const function out of the header and into the source file. BUG=134750 Review URL: https://chromiumcodereview.appspot.com/11366082 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@166393 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--content/common/gpu/gpu_memory_manager.cc103
-rw-r--r--content/common/gpu/gpu_memory_manager.h61
-rw-r--r--content/common/gpu/gpu_memory_manager_unittest.cc96
3 files changed, 192 insertions, 68 deletions
diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc
index 2c0ed97..719efa1 100644
--- a/content/common/gpu/gpu_memory_manager.cc
+++ b/content/common/gpu/gpu_memory_manager.cc
@@ -24,6 +24,11 @@ namespace {
const int kDelayedScheduleManageTimeoutMs = 67;
+void TrackValueChanged(size_t old_size, size_t new_size, size_t* total_size) {
+ DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
+ *total_size += (new_size - old_size);
+}
+
}
void GpuMemoryManager::AssignMemoryAllocations(
@@ -44,7 +49,8 @@ GpuMemoryManager::GpuMemoryManager(
bytes_available_gpu_memory_(0),
bytes_available_gpu_memory_overridden_(false),
bytes_allocated_current_(0),
- bytes_allocated_historical_max_(0),
+ bytes_allocated_managed_visible_(0),
+ bytes_allocated_managed_backgrounded_(0),
window_count_has_been_received_(false),
window_count_(0),
disable_schedule_manage_(false)
@@ -63,6 +69,52 @@ GpuMemoryManager::GpuMemoryManager(
GpuMemoryManager::~GpuMemoryManager() {
DCHECK(tracking_groups_.empty());
DCHECK(clients_.empty());
+ DCHECK(!bytes_allocated_current_);
+ DCHECK(!bytes_allocated_managed_visible_);
+ DCHECK(!bytes_allocated_managed_backgrounded_);
+}
+
+size_t GpuMemoryManager::GetAvailableGpuMemory() const {
+ return bytes_available_gpu_memory_;
+}
+
+size_t GpuMemoryManager::GetDefaultAvailableGpuMemory() const {
+#if defined(OS_ANDROID)
+ return 32 * 1024 * 1024;
+#elif defined(OS_CHROMEOS)
+ return 1024 * 1024 * 1024;
+#else
+ return 256 * 1024 * 1024;
+#endif
+}
+
+size_t GpuMemoryManager::GetMaximumTotalGpuMemory() const {
+#if defined(OS_ANDROID)
+ return 256 * 1024 * 1024;
+#else
+ return 1024 * 1024 * 1024;
+#endif
+}
+
+size_t GpuMemoryManager::GetMaximumTabAllocation() const {
+#if defined(OS_ANDROID) || defined(OS_CHROMEOS)
+ return bytes_available_gpu_memory_;
+#else
+ // This is to avoid allowing a single page on to use a full 256MB of memory
+ // (the current total limit). Long-scroll pages will hit this limit,
+ // resulting in instability on some platforms (e.g, issue 141377).
+ return bytes_available_gpu_memory_ / 2;
+#endif
+}
+
+size_t GpuMemoryManager::GetMinimumTabAllocation() const {
+#if defined(OS_ANDROID)
+ return 32 * 1024 * 1024;
+#elif defined(OS_CHROMEOS)
+ return 64 * 1024 * 1024;
+#else
+ return 64 * 1024 * 1024;
+#endif
}
size_t GpuMemoryManager::CalcAvailableFromViewportArea(int viewport_area) {
@@ -186,17 +238,7 @@ void GpuMemoryManager::ScheduleManage(bool immediate) {
void GpuMemoryManager::TrackMemoryAllocatedChange(size_t old_size,
size_t new_size) {
- if (new_size < old_size) {
- size_t delta = old_size - new_size;
- DCHECK(bytes_allocated_current_ >= delta);
- bytes_allocated_current_ -= delta;
- } else {
- size_t delta = new_size - old_size;
- bytes_allocated_current_ += delta;
- if (bytes_allocated_current_ > bytes_allocated_historical_max_) {
- bytes_allocated_historical_max_ = bytes_allocated_current_;
- }
- }
+ TrackValueChanged(old_size, new_size, &bytes_allocated_current_);
if (new_size != old_size) {
TRACE_COUNTER1("gpu",
"GpuMemoryUsage",
@@ -210,11 +252,13 @@ void GpuMemoryManager::AddClient(GpuMemoryManagerClient* client,
base::TimeTicks last_used_time) {
if (clients_.count(client))
return;
- std::pair<GpuMemoryManagerClient*, ClientState*> entry(
- client,
- new ClientState(client, has_surface, visible, last_used_time));
- clients_.insert(entry);
-
+ ClientState* client_state =
+ new ClientState(client, has_surface, visible, last_used_time);
+ TrackValueChanged(0, client_state->managed_memory_stats.bytes_allocated,
+ client_state->visible ?
+ &bytes_allocated_managed_visible_ :
+ &bytes_allocated_managed_backgrounded_);
+ clients_.insert(std::make_pair(client, client_state));
ScheduleManage(true);
}
@@ -222,9 +266,13 @@ void GpuMemoryManager::RemoveClient(GpuMemoryManagerClient* client) {
ClientMap::iterator it = clients_.find(client);
if (it == clients_.end())
return;
- delete it->second;
+ ClientState* client_state = it->second;
+ TrackValueChanged(client_state->managed_memory_stats.bytes_allocated, 0,
+ client_state->visible ?
+ &bytes_allocated_managed_visible_ :
+ &bytes_allocated_managed_backgrounded_);
+ delete client_state;
clients_.erase(it);
-
ScheduleManage(false);
}
@@ -235,8 +283,17 @@ void GpuMemoryManager::SetClientVisible(GpuMemoryManagerClient* client,
if (it == clients_.end())
return;
ClientState* client_state = it->second;
-
+ if (client_state->visible == visible)
+ return;
client_state->visible = visible;
+ TrackValueChanged(client_state->managed_memory_stats.bytes_allocated, 0,
+ client_state->visible ?
+ &bytes_allocated_managed_backgrounded_ :
+ &bytes_allocated_managed_visible_);
+ TrackValueChanged(0, client_state->managed_memory_stats.bytes_allocated,
+ client_state->visible ?
+ &bytes_allocated_managed_visible_ :
+ &bytes_allocated_managed_backgrounded_);
ScheduleManage(visible);
}
@@ -248,7 +305,11 @@ void GpuMemoryManager::SetClientManagedMemoryStats(
if (it == clients_.end())
return;
ClientState* client_state = it->second;
-
+ TrackValueChanged(client_state->managed_memory_stats.bytes_allocated,
+ stats.bytes_allocated,
+ client_state->visible ?
+ &bytes_allocated_managed_visible_ :
+ &bytes_allocated_managed_backgrounded_);
client_state->managed_memory_stats = stats;
}
diff --git a/content/common/gpu/gpu_memory_manager.h b/content/common/gpu/gpu_memory_manager.h
index 57bce4e..377177a 100644
--- a/content/common/gpu/gpu_memory_manager.h
+++ b/content/common/gpu/gpu_memory_manager.h
@@ -101,6 +101,8 @@ class CONTENT_EXPORT GpuMemoryManager :
GpuMemoryAllocationCompareTests);
FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
StubMemoryStatsForLastManageTests);
+ FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
+ TestManagedUsageTracking);
struct ClientState {
ClientState(GpuMemoryManagerClient* client,
@@ -141,60 +143,24 @@ class CONTENT_EXPORT GpuMemoryManager :
// on what the stubs' contexts report.
void UpdateAvailableGpuMemory(const ClientStateVector& clients);
- // The amount of video memory which is available for allocation
- size_t GetAvailableGpuMemory() const {
- return bytes_available_gpu_memory_;
- }
+ // The amount of video memory which is available for allocation.
+ size_t GetAvailableGpuMemory() const;
// Default per-OS value for the amount of available GPU memory, used
// if we can't query the driver for an exact value.
- size_t GetDefaultAvailableGpuMemory() const {
-#if defined(OS_ANDROID)
- return 32 * 1024 * 1024;
-#elif defined(OS_CHROMEOS)
- return 1024 * 1024 * 1024;
-#else
- return 256 * 1024 * 1024;
-#endif
- }
+ size_t GetDefaultAvailableGpuMemory() const;
+
+ // Maximum cap on total GPU memory, no matter how much the GPU reports.
+ size_t GetMaximumTotalGpuMemory() const;
+
+ // The maximum and minimum amount of memory that a tab may be assigned.
+ size_t GetMaximumTabAllocation() const;
+ size_t GetMinimumTabAllocation() const;
// Get a reasonable memory limit from a viewport's surface area.
static size_t CalcAvailableFromViewportArea(int viewport_area);
static size_t CalcAvailableFromGpuTotal(size_t total_gpu_memory);
- // Maximum cap on total GPU memory, no matter how much
- // the GPU reports to have.
- static size_t GetMaximumTotalGpuMemory() {
-#if defined(OS_ANDROID)
- return 256 * 1024 * 1024;
-#else
- return 1024 * 1024 * 1024;
-#endif
- }
-
- // The maximum amount of memory that a tab may be assigned
- size_t GetMaximumTabAllocation() const {
-#if defined(OS_ANDROID) || defined(OS_CHROMEOS)
- return bytes_available_gpu_memory_;
-#else
- // This is to avoid allowing a single page on to use a full 256MB of memory
- // (the current total limit). Long-scroll pages will hit this limit,
- // resulting in instability on some platforms (e.g, issue 141377).
- return bytes_available_gpu_memory_ / 2;
-#endif
- }
-
- // The minimum non-zero amount of memory that a tab may be assigned
- static size_t GetMinimumTabAllocation() {
-#if defined(OS_ANDROID)
- return 32 * 1024 * 1024;
-#elif defined(OS_CHROMEOS)
- return 64 * 1024 * 1024;
-#else
- return 64 * 1024 * 1024;
-#endif
- }
-
// Interfaces for testing
void TestingSetClientVisible(GpuMemoryManagerClient* client, bool visible);
void TestingSetClientLastUsedTime(GpuMemoryManagerClient* client,
@@ -223,7 +189,8 @@ class CONTENT_EXPORT GpuMemoryManager :
// The current total memory usage, and historical maximum memory usage
size_t bytes_allocated_current_;
- size_t bytes_allocated_historical_max_;
+ size_t bytes_allocated_managed_visible_;
+ size_t bytes_allocated_managed_backgrounded_;
// The number of browser windows that exist. If we ever receive a
// GpuMsg_SetVideoMemoryWindowCount, then we use this to compute memory
diff --git a/content/common/gpu/gpu_memory_manager_unittest.cc b/content/common/gpu/gpu_memory_manager_unittest.cc
index a63a218..a26dd3d 100644
--- a/content/common/gpu/gpu_memory_manager_unittest.cc
+++ b/content/common/gpu/gpu_memory_manager_unittest.cc
@@ -662,4 +662,100 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_GT(stub3allocation4, stub3allocation3);
}
+// Test GpuMemoryManager's managed memory tracking
+TEST_F(GpuMemoryManagerTest, TestManagedUsageTracking) {
+ FakeClient stub1(memmgr_, GenerateUniqueSurfaceId(), true, older_),
+ stub2(memmgr_, GenerateUniqueSurfaceId(), false, older_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Set memory allocations and verify the results are reflected.
+ memmgr_.SetClientManagedMemoryStats(
+ &stub1, GpuManagedMemoryStats(0, 0, 5, false));
+ memmgr_.SetClientManagedMemoryStats(
+ &stub2, GpuManagedMemoryStats(0, 0, 7, false));
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Redundantly add a client and make sure nothing changes
+ memmgr_.AddClient(&stub1, true, true, older_);
+ memmgr_.AddClient(&stub2, true, true, older_);
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Remove a visible client
+ memmgr_.RemoveClient(&stub1);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ memmgr_.SetClientManagedMemoryStats(
+ &stub1, GpuManagedMemoryStats(0, 0, 99, false));
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ memmgr_.AddClient(&stub1, true, true, older_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ memmgr_.SetClientManagedMemoryStats(
+ &stub1, GpuManagedMemoryStats(0, 0, 5, false));
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Remove a backgrounded client
+ memmgr_.RemoveClient(&stub2);
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ memmgr_.SetClientManagedMemoryStats(
+ &stub2, GpuManagedMemoryStats(0, 0, 99, false));
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ memmgr_.AddClient(&stub2, true, false, older_);
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ memmgr_.SetClientManagedMemoryStats(
+ &stub2, GpuManagedMemoryStats(0, 0, 7, false));
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Create and then destroy some stubs, and verify their allocations go away.
+ {
+ FakeClient stub3(memmgr_, GenerateUniqueSurfaceId(), true, older_),
+ stub4(memmgr_, GenerateUniqueSurfaceId(), false, older_);
+ memmgr_.SetClientManagedMemoryStats(
+ &stub3, GpuManagedMemoryStats(0, 0, 1, false));
+ memmgr_.SetClientManagedMemoryStats(
+ &stub4, GpuManagedMemoryStats(0, 0, 2, false));
+ EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(9ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ }
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Do no-op changes to stubs' visibility and make sure nothing chnages.
+ memmgr_.SetClientVisible(&stub1, true);
+ memmgr_.SetClientVisible(&stub2, false);
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Change visbility state.
+ memmgr_.SetClientVisible(&stub1, false);
+ memmgr_.SetClientVisible(&stub2, true);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Increase allocation amounts.
+ memmgr_.SetClientManagedMemoryStats(
+ &stub1, GpuManagedMemoryStats(0, 0, 6, false));
+ memmgr_.SetClientManagedMemoryStats(
+ &stub2, GpuManagedMemoryStats(0, 0, 8, false));
+ EXPECT_EQ(8ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_backgrounded_);
+
+ // Decrease allocation amounts.
+ memmgr_.SetClientManagedMemoryStats(
+ &stub1, GpuManagedMemoryStats(0, 0, 4, false));
+ memmgr_.SetClientManagedMemoryStats(
+ &stub2, GpuManagedMemoryStats(0, 0, 6, false));
+ EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_visible_);
+ EXPECT_EQ(4ul, memmgr_.bytes_allocated_managed_backgrounded_);
+}
+
} // namespace content