summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/common/gpu/gpu_memory_manager.cc162
-rw-r--r--content/common/gpu/gpu_memory_manager.h20
-rw-r--r--content/common/gpu/gpu_memory_manager_client.cc8
-rw-r--r--content/common/gpu/gpu_memory_manager_client.h13
-rw-r--r--content/common/gpu/gpu_memory_manager_unittest.cc128
5 files changed, 166 insertions, 165 deletions
diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc
index e8f9cd7c..0c5b939 100644
--- a/content/common/gpu/gpu_memory_manager.cc
+++ b/content/common/gpu/gpu_memory_manager.cc
@@ -57,10 +57,10 @@ GpuMemoryManager::GpuMemoryManager(
bytes_available_gpu_memory_overridden_(false),
bytes_minimum_per_client_(0),
bytes_minimum_per_client_overridden_(false),
- bytes_backgrounded_available_gpu_memory_(0),
+ bytes_nonvisible_available_gpu_memory_(0),
bytes_allocated_managed_current_(0),
bytes_allocated_managed_visible_(0),
- bytes_allocated_managed_backgrounded_(0),
+ bytes_allocated_managed_nonvisible_(0),
bytes_allocated_unmanaged_current_(0),
bytes_allocated_historical_max_(0),
bytes_allocated_unmanaged_high_(0),
@@ -79,7 +79,7 @@ GpuMemoryManager::GpuMemoryManager(
bytes_available_gpu_memory_overridden_ = true;
} else
bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory();
- UpdateBackgroundedAvailableGpuMemory();
+ UpdateNonvisibleAvailableGpuMemory();
}
GpuMemoryManager::~GpuMemoryManager() {
@@ -90,7 +90,7 @@ GpuMemoryManager::~GpuMemoryManager() {
DCHECK(!bytes_allocated_managed_current_);
DCHECK(!bytes_allocated_unmanaged_current_);
DCHECK(!bytes_allocated_managed_visible_);
- DCHECK(!bytes_allocated_managed_backgrounded_);
+ DCHECK(!bytes_allocated_managed_nonvisible_);
}
size_t GpuMemoryManager::GetAvailableGpuMemory() const {
@@ -101,9 +101,9 @@ size_t GpuMemoryManager::GetAvailableGpuMemory() const {
return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_;
}
-size_t GpuMemoryManager::GetCurrentBackgroundedAvailableGpuMemory() const {
+size_t GpuMemoryManager::GetCurrentNonvisibleAvailableGpuMemory() const {
if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) {
- return std::min(bytes_backgrounded_available_gpu_memory_,
+ return std::min(bytes_nonvisible_available_gpu_memory_,
GetAvailableGpuMemory() - bytes_allocated_managed_visible_);
}
return 0;
@@ -127,7 +127,7 @@ size_t GpuMemoryManager::GetMaximumTotalGpuMemory() const {
#endif
}
-size_t GpuMemoryManager::GetMaximumTabAllocation() const {
+size_t GpuMemoryManager::GetMaximumClientAllocation() const {
#if defined(OS_ANDROID) || defined(OS_CHROMEOS)
return bytes_available_gpu_memory_;
#else
@@ -138,7 +138,7 @@ size_t GpuMemoryManager::GetMaximumTabAllocation() const {
#endif
}
-size_t GpuMemoryManager::GetMinimumTabAllocation() const {
+size_t GpuMemoryManager::GetMinimumClientAllocation() const {
if (bytes_minimum_per_client_overridden_)
return bytes_minimum_per_client_;
#if defined(OS_ANDROID)
@@ -247,13 +247,13 @@ void GpuMemoryManager::UpdateUnmanagedMemoryLimits() {
bytes_unmanaged_limit_step_);
}
-void GpuMemoryManager::UpdateBackgroundedAvailableGpuMemory() {
- // Be conservative and disable saving backgrounded tabs' textures on Android
+void GpuMemoryManager::UpdateNonvisibleAvailableGpuMemory() {
+ // Be conservative and disable saving nonvisible clients' textures on Android
// for the moment
#if defined(OS_ANDROID)
- bytes_backgrounded_available_gpu_memory_ = 0;
+ bytes_nonvisible_available_gpu_memory_ = 0;
#else
- bytes_backgrounded_available_gpu_memory_ = GetAvailableGpuMemory() / 4;
+ bytes_nonvisible_available_gpu_memory_ = GetAvailableGpuMemory() / 4;
#endif
}
@@ -336,7 +336,7 @@ GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState(
TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated,
client_state->visible_ ?
&bytes_allocated_managed_visible_ :
- &bytes_allocated_managed_backgrounded_);
+ &bytes_allocated_managed_nonvisible_);
AddClientToList(client_state);
ScheduleManage(kScheduleManageNow);
return client_state;
@@ -348,7 +348,7 @@ void GpuMemoryManager::OnDestroyClientState(
TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0,
client_state->visible_ ?
&bytes_allocated_managed_visible_ :
- &bytes_allocated_managed_backgrounded_);
+ &bytes_allocated_managed_nonvisible_);
ScheduleManage(kScheduleManageLater);
}
@@ -364,12 +364,12 @@ void GpuMemoryManager::SetClientStateVisible(
TrackValueChanged(client_state->managed_memory_stats_.bytes_allocated, 0,
client_state->visible_ ?
- &bytes_allocated_managed_backgrounded_ :
+ &bytes_allocated_managed_nonvisible_ :
&bytes_allocated_managed_visible_);
TrackValueChanged(0, client_state->managed_memory_stats_.bytes_allocated,
client_state->visible_ ?
&bytes_allocated_managed_visible_ :
- &bytes_allocated_managed_backgrounded_);
+ &bytes_allocated_managed_nonvisible_);
ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
}
@@ -381,24 +381,24 @@ void GpuMemoryManager::SetClientStateManagedMemoryStats(
stats.bytes_allocated,
client_state->visible_ ?
&bytes_allocated_managed_visible_ :
- &bytes_allocated_managed_backgrounded_);
+ &bytes_allocated_managed_nonvisible_);
client_state->managed_memory_stats_ = stats;
if (use_nonuniform_memory_policy_) {
// If these statistics sit outside of the range that we used in our
- // computation of memory budgets then recompute the budgets.
+ // computation of memory allocations then recompute the allocations.
if (client_state->managed_memory_stats_.bytes_nice_to_have >
- client_state->bytes_nice_to_have_limit_high_) {
+ client_state->bytes_nicetohave_limit_high_) {
ScheduleManage(kScheduleManageNow);
} else if (client_state->managed_memory_stats_.bytes_nice_to_have <
- client_state->bytes_nice_to_have_limit_low_) {
+ client_state->bytes_nicetohave_limit_low_) {
ScheduleManage(kScheduleManageLater);
}
} else {
- // If this allocation pushed our usage of backgrounded tabs memory over the
- // limit, then schedule a drop of backgrounded memory.
- if (bytes_allocated_managed_backgrounded_ >
- GetCurrentBackgroundedAvailableGpuMemory())
+ // If this allocation pushed our usage of nonvisible clients' memory over
+ // the limit, then schedule a drop of nonvisible memory.
+ if (bytes_allocated_managed_nonvisible_ >
+ GetCurrentNonvisibleAvailableGpuMemory())
ScheduleManage(kScheduleManageLater);
}
}
@@ -489,9 +489,9 @@ void GpuMemoryManager::Manage() {
// Update the limit on unmanaged memory.
UpdateUnmanagedMemoryLimits();
- // Update the backgrounded available gpu memory because it depends on
+ // Update the nonvisible available gpu memory because it depends on
// the available GPU memory.
- UpdateBackgroundedAvailableGpuMemory();
+ UpdateNonvisibleAvailableGpuMemory();
// Determine which clients are "hibernated" (which determines the
// distribution of frontbuffers and memory among clients that don't have
@@ -518,7 +518,7 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
clients_nonvisible_mru_.begin(),
clients_nonvisible_mru_.end());
- // Compute budget when visible for all clients
+ // Compute allocation when visible for all clients
for (ClientStateList::const_iterator it = clients.begin();
it != clients.end();
++it) {
@@ -527,40 +527,40 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
// Give the client 4/3 of what it needs to draw everything that is in
// the "nice to have" bucket, so that it has some room to grow.
- client_state->bytes_budget_when_visible_ =
+ client_state->bytes_allocation_when_visible_ =
4 * stats->bytes_nice_to_have / 3;
// Re-assign memory limits to this client when its "nice to have" bucket
// grows or shrinks by 1/4.
- client_state->bytes_nice_to_have_limit_high_ =
+ client_state->bytes_nicetohave_limit_high_ =
5 * stats->bytes_nice_to_have / 4;
- client_state->bytes_nice_to_have_limit_low_ =
+ client_state->bytes_nicetohave_limit_low_ =
3 * stats->bytes_nice_to_have / 4;
// Clamp to the acceptable range.
- client_state->bytes_budget_when_visible_ = std::min(
- client_state->bytes_budget_when_visible_,
- GetMaximumTabAllocation());
- client_state->bytes_budget_when_visible_ = std::max(
- client_state->bytes_budget_when_visible_,
- GetMinimumTabAllocation());
+ client_state->bytes_allocation_when_visible_ = std::min(
+ client_state->bytes_allocation_when_visible_,
+ GetMaximumClientAllocation());
+ client_state->bytes_allocation_when_visible_ = std::max(
+ client_state->bytes_allocation_when_visible_,
+ GetMinimumClientAllocation());
// Compute how much space is used by visible clients.
if (client_state->visible_)
- bytes_allocated_visible += client_state->bytes_budget_when_visible_;
+ bytes_allocated_visible += client_state->bytes_allocation_when_visible_;
}
// TODO(ccameron): If bytes_allocated_visible exceeds bytes_available_total,
// then cut down the amount of memory given out. This has to be done
- // carefully -- we don't want a single heavy tab to cause other light tabs
- // to not display correctly.
+ // carefully -- we don't want a single heavy client to cause other light
+ // clients to not display correctly.
- // Allow up to 1/4 of the memory that was available for visible tabs to
- // go to backgrounded tabs.
- size_t bytes_available_backgrounded = 0;
- size_t bytes_allocated_backgrounded = 0;
+ // Allow up to 1/4 of the memory that was available for visible clients to
+ // go to nonvisible clients.
+ size_t bytes_available_nonvisible = 0;
+ size_t bytes_allocated_nonvisible = 0;
if (bytes_available_total > bytes_allocated_visible) {
- bytes_available_backgrounded = std::min(
+ bytes_available_nonvisible = std::min(
bytes_available_total / 4,
bytes_available_total - bytes_allocated_visible);
}
@@ -571,34 +571,34 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_;
// Compute the amount of space we have for this renderer when it is
- // backgrounded.
- size_t bytes_available_backgrounded_adjusted = 0;
+ // nonvisible.
+ size_t bytes_available_nonvisible_adjusted = 0;
if (client_state->visible_) {
- // If this is a visible tab, don't count this tab's budget while visible
- // against the backgrounded tabs' budget total.
- bytes_available_backgrounded_adjusted = std::min(
- bytes_available_backgrounded +
- client_state->bytes_budget_when_visible_ / 4,
+ // If this is a visible client, don't count this client's allocation
+ // while visible against the nonvisible clients' allocation total.
+ bytes_available_nonvisible_adjusted = std::min(
+ bytes_available_nonvisible +
+ client_state->bytes_allocation_when_visible_ / 4,
bytes_available_total / 4);
- } else if (bytes_available_backgrounded > bytes_allocated_backgrounded) {
- // If this is a backgrounded tab, take into account all more recently
- // used backgrounded tabs.
- bytes_available_backgrounded_adjusted =
- bytes_available_backgrounded - bytes_allocated_backgrounded;
+ } else if (bytes_available_nonvisible > bytes_allocated_nonvisible) {
+ // If this is a nonvisible client, take into account all more recently
+ // used nonvisible clients.
+ bytes_available_nonvisible_adjusted =
+ bytes_available_nonvisible - bytes_allocated_nonvisible;
}
- // Give a budget of 9/8ths of the required memory when backgrounded, if it
+ // Give a allocation of 9/8ths of the required memory when nonvisible, if it
// fits within the limit we just calculated.
- client_state->bytes_budget_when_backgrounded_ =
+ client_state->bytes_allocation_when_nonvisible_ =
9 * stats->bytes_required / 8;
- if (client_state->bytes_budget_when_backgrounded_ >
- bytes_available_backgrounded_adjusted)
- client_state->bytes_budget_when_backgrounded_ = 0;
+ if (client_state->bytes_allocation_when_nonvisible_ >
+ bytes_available_nonvisible_adjusted)
+ client_state->bytes_allocation_when_nonvisible_ = 0;
- // Update the amount of memory given out to backgrounded tabs.
+ // Update the amount of memory given out to nonvisible clients.
if (!client_state->visible_)
- bytes_allocated_backgrounded +=
- client_state->bytes_budget_when_backgrounded_;
+ bytes_allocated_nonvisible +=
+ client_state->bytes_allocation_when_nonvisible_;
}
// Assign budgets to clients.
@@ -612,12 +612,12 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
!client_state->hibernated_;
allocation.renderer_allocation.bytes_limit_when_visible =
- client_state->bytes_budget_when_visible_;
+ client_state->bytes_allocation_when_visible_;
allocation.renderer_allocation.priority_cutoff_when_visible =
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything;
allocation.renderer_allocation.bytes_limit_when_not_visible =
- client_state->bytes_budget_when_backgrounded_;
+ client_state->bytes_allocation_when_nonvisible_;
allocation.renderer_allocation.priority_cutoff_when_not_visible =
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
@@ -626,7 +626,7 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
}
void GpuMemoryManager::AssignSurfacesAllocationsUniform() {
- // Determine how much memory to assign to give to visible and backgrounded
+ // Determine how much memory to assign to give to visible and nonvisible
// clients.
size_t bytes_limit_when_visible = GetVisibleClientAllocation();
@@ -653,12 +653,12 @@ void GpuMemoryManager::AssignSurfacesAllocationsUniform() {
allocation.renderer_allocation.priority_cutoff_when_visible =
priority_cutoff_when_visible;
- // Allow this client to keep its textures when backgrounded if they
+ // Allow this client to keep its textures when nonvisible if they
// aren't so expensive that they won't fit.
if (client_state->managed_memory_stats_.bytes_required <=
- bytes_backgrounded_available_gpu_memory_) {
+ bytes_nonvisible_available_gpu_memory_) {
allocation.renderer_allocation.bytes_limit_when_not_visible =
- GetCurrentBackgroundedAvailableGpuMemory();
+ GetCurrentNonvisibleAvailableGpuMemory();
allocation.renderer_allocation.priority_cutoff_when_not_visible =
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
} else {
@@ -670,8 +670,8 @@ void GpuMemoryManager::AssignSurfacesAllocationsUniform() {
client_state->client_->SetMemoryAllocation(allocation);
}
- // Assign memory allocations to backgrounded clients.
- size_t bytes_allocated_backgrounded = 0;
+ // Assign memory allocations to nonvisible clients.
+ size_t bytes_allocated_nonvisible = 0;
for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
it != clients_nonvisible_mru_.end();
++it) {
@@ -686,12 +686,12 @@ void GpuMemoryManager::AssignSurfacesAllocationsUniform() {
priority_cutoff_when_visible;
if (client_state->managed_memory_stats_.bytes_required +
- bytes_allocated_backgrounded <=
- GetCurrentBackgroundedAvailableGpuMemory()) {
- bytes_allocated_backgrounded +=
+ bytes_allocated_nonvisible <=
+ GetCurrentNonvisibleAvailableGpuMemory()) {
+ bytes_allocated_nonvisible +=
client_state->managed_memory_stats_.bytes_required;
allocation.renderer_allocation.bytes_limit_when_not_visible =
- GetCurrentBackgroundedAvailableGpuMemory();
+ GetCurrentNonvisibleAvailableGpuMemory();
allocation.renderer_allocation.priority_cutoff_when_not_visible =
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired;
} else {
@@ -713,7 +713,7 @@ void GpuMemoryManager::AssignNonSurfacesAllocations() {
if (!client_state->hibernated_) {
allocation.renderer_allocation.bytes_limit_when_visible =
- GetMinimumTabAllocation();
+ GetMinimumClientAllocation();
allocation.renderer_allocation.priority_cutoff_when_visible =
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything;
}
@@ -780,14 +780,14 @@ size_t GpuMemoryManager::GetVisibleClientAllocation() const {
// after giving out the minimum to those that need it.
size_t num_clients_need_mem = clients_with_surface_visible_count +
clients_without_surface_not_hibernated_count;
- size_t base_allocation_size = GetMinimumTabAllocation() *
+ size_t base_allocation_size = GetMinimumClientAllocation() *
num_clients_need_mem;
size_t bonus_allocation = 0;
if (base_allocation_size < GetAvailableGpuMemory() &&
clients_with_surface_visible_count)
bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) /
clients_with_surface_visible_count;
- size_t clients_allocation_when_visible = GetMinimumTabAllocation() +
+ size_t clients_allocation_when_visible = GetMinimumClientAllocation() +
bonus_allocation;
// If we have received a window count message, then override the client-based
@@ -799,8 +799,8 @@ size_t GpuMemoryManager::GetVisibleClientAllocation() const {
}
// Limit the memory per client to its maximum allowed level.
- if (clients_allocation_when_visible >= GetMaximumTabAllocation())
- clients_allocation_when_visible = GetMaximumTabAllocation();
+ if (clients_allocation_when_visible >= GetMaximumClientAllocation())
+ clients_allocation_when_visible = GetMaximumClientAllocation();
return clients_allocation_when_visible;
}
diff --git a/content/common/gpu/gpu_memory_manager.h b/content/common/gpu/gpu_memory_manager.h
index ba1a527..b434801 100644
--- a/content/common/gpu/gpu_memory_manager.h
+++ b/content/common/gpu/gpu_memory_manager.h
@@ -103,7 +103,7 @@ class CONTENT_EXPORT GpuMemoryManager :
void Manage();
void SetClientsHibernatedState() const;
size_t GetVisibleClientAllocation() const;
- size_t GetCurrentBackgroundedAvailableGpuMemory() const;
+ size_t GetCurrentNonvisibleAvailableGpuMemory() const;
void AssignSurfacesAllocationsNonuniform();
void AssignSurfacesAllocationsUniform();
void AssignNonSurfacesAllocations();
@@ -112,7 +112,7 @@ class CONTENT_EXPORT GpuMemoryManager :
// on what the stubs' contexts report.
void UpdateAvailableGpuMemory();
void UpdateUnmanagedMemoryLimits();
- void UpdateBackgroundedAvailableGpuMemory();
+ void UpdateNonvisibleAvailableGpuMemory();
// The amount of video memory which is available for allocation.
size_t GetAvailableGpuMemory() const;
@@ -125,8 +125,8 @@ class CONTENT_EXPORT GpuMemoryManager :
size_t GetMaximumTotalGpuMemory() const;
// The maximum and minimum amount of memory that a tab may be assigned.
- size_t GetMaximumTabAllocation() const;
- size_t GetMinimumTabAllocation() const;
+ size_t GetMaximumClientAllocation() const;
+ size_t GetMinimumClientAllocation() const;
// Get a reasonable memory limit from a viewport's surface area.
static size_t CalcAvailableFromViewportArea(int viewport_area);
@@ -179,8 +179,8 @@ class CONTENT_EXPORT GpuMemoryManager :
bytes_unmanaged_limit_step_ = bytes;
}
- void TestingSetBackgroundedAvailableGpuMemory(size_t bytes) {
- bytes_backgrounded_available_gpu_memory_ = bytes;
+ void TestingSetNonvisibleAvailableGpuMemory(size_t bytes) {
+ bytes_nonvisible_available_gpu_memory_ = bytes;
}
GpuChannelManager* channel_manager_;
@@ -214,13 +214,13 @@ class CONTENT_EXPORT GpuMemoryManager :
bool bytes_minimum_per_client_overridden_;
// The maximum amount of memory that can be allocated for GPU resources
- // in backgrounded renderers.
- size_t bytes_backgrounded_available_gpu_memory_;
+ // in nonvisible renderers.
+ size_t bytes_nonvisible_available_gpu_memory_;
// The current total memory usage, and historical maximum memory usage
size_t bytes_allocated_managed_current_;
size_t bytes_allocated_managed_visible_;
- size_t bytes_allocated_managed_backgrounded_;
+ size_t bytes_allocated_managed_nonvisible_;
size_t bytes_allocated_unmanaged_current_;
size_t bytes_allocated_historical_max_;
@@ -234,7 +234,7 @@ class CONTENT_EXPORT GpuMemoryManager :
// The number of browser windows that exist. If we ever receive a
// GpuMsg_SetVideoMemoryWindowCount, then we use this to compute memory
- // budgets, instead of doing more complicated stub-based calculations.
+ // allocations, instead of doing more complicated stub-based calculations.
bool window_count_has_been_received_;
uint32 window_count_;
diff --git a/content/common/gpu/gpu_memory_manager_client.cc b/content/common/gpu/gpu_memory_manager_client.cc
index 7c867ed..00d31f9 100644
--- a/content/common/gpu/gpu_memory_manager_client.cc
+++ b/content/common/gpu/gpu_memory_manager_client.cc
@@ -22,10 +22,10 @@ GpuMemoryManagerClientState::GpuMemoryManagerClientState(
has_surface_(has_surface),
visible_(visible),
list_iterator_valid_(false),
- bytes_nice_to_have_limit_low_(0),
- bytes_nice_to_have_limit_high_(0),
- bytes_budget_when_visible_(0),
- bytes_budget_when_backgrounded_(0),
+ bytes_nicetohave_limit_low_(0),
+ bytes_nicetohave_limit_high_(0),
+ bytes_allocation_when_visible_(0),
+ bytes_allocation_when_nonvisible_(0),
hibernated_(false) {
}
diff --git a/content/common/gpu/gpu_memory_manager_client.h b/content/common/gpu/gpu_memory_manager_client.h
index 287c0fe..87fba6e 100644
--- a/content/common/gpu/gpu_memory_manager_client.h
+++ b/content/common/gpu/gpu_memory_manager_client.h
@@ -84,14 +84,15 @@ class CONTENT_EXPORT GpuMemoryManagerClientState {
// Statistics about memory usage.
GpuManagedMemoryStats managed_memory_stats_;
- // When managed_memory_stats_.bytes_nice_to_have leaves the range
+ // When managed_memory_stats_.bytes_nicetohave leaves the range
// [low_, high_], then re-adjust memory limits.
- size_t bytes_nice_to_have_limit_low_;
- size_t bytes_nice_to_have_limit_high_;
+ size_t bytes_nicetohave_limit_low_;
+ size_t bytes_nicetohave_limit_high_;
- // The budget for this client.
- size_t bytes_budget_when_visible_;
- size_t bytes_budget_when_backgrounded_;
+ // The allocation for this client, used transiently during memory policy
+ // calculation.
+ size_t bytes_allocation_when_visible_;
+ size_t bytes_allocation_when_nonvisible_;
// Set to disable allocating a frontbuffer or to disable allocations
// for clients that don't have surfaces.
diff --git a/content/common/gpu/gpu_memory_manager_unittest.cc b/content/common/gpu/gpu_memory_manager_unittest.cc
index 58414f4..6a8686c 100644
--- a/content/common/gpu/gpu_memory_manager_unittest.cc
+++ b/content/common/gpu/gpu_memory_manager_unittest.cc
@@ -176,35 +176,35 @@ class GpuMemoryManagerTest : public testing::Test {
return alloc.browser_allocation.suggest_have_frontbuffer &&
!alloc.renderer_allocation.have_backbuffer_when_not_visible &&
alloc.renderer_allocation.bytes_limit_when_visible >=
- GetMinimumTabAllocation();
+ GetMinimumClientAllocation();
}
bool IsAllocationBackgroundForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return alloc.browser_allocation.suggest_have_frontbuffer &&
!alloc.renderer_allocation.have_backbuffer_when_not_visible &&
alloc.renderer_allocation.bytes_limit_when_not_visible <=
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory();
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory();
}
bool IsAllocationHibernatedForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return !alloc.browser_allocation.suggest_have_frontbuffer &&
!alloc.renderer_allocation.have_backbuffer_when_not_visible &&
alloc.renderer_allocation.bytes_limit_when_not_visible <=
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory();
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory();
}
bool IsAllocationForegroundForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.browser_allocation.suggest_have_frontbuffer &&
!alloc.renderer_allocation.have_backbuffer_when_not_visible &&
alloc.renderer_allocation.bytes_limit_when_visible ==
- GetMinimumTabAllocation();
+ GetMinimumClientAllocation();
}
bool IsAllocationBackgroundForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.browser_allocation.suggest_have_frontbuffer &&
!alloc.renderer_allocation.have_backbuffer_when_not_visible &&
alloc.renderer_allocation.bytes_limit_when_visible ==
- GetMinimumTabAllocation();
+ GetMinimumClientAllocation();
}
bool IsAllocationHibernatedForSurfaceNo(
const GpuMemoryAllocation& alloc) {
@@ -236,12 +236,12 @@ class GpuMemoryManagerTest : public testing::Test {
return memmgr_.GetAvailableGpuMemory();
}
- size_t GetMaximumTabAllocation() {
- return memmgr_.GetMaximumTabAllocation();
+ size_t GetMaximumClientAllocation() {
+ return memmgr_.GetMaximumClientAllocation();
}
- size_t GetMinimumTabAllocation() {
- return memmgr_.GetMinimumTabAllocation();
+ size_t GetMinimumClientAllocation() {
+ return memmgr_.GetMinimumClientAllocation();
}
GpuMemoryManager memmgr_;
@@ -462,11 +462,11 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
// Test GpuMemoryAllocation memory allocation bonuses:
// When the number of visible tabs is small, each tab should get a
// gpu_resource_size_in_bytes allocation value that is greater than
-// GetMinimumTabAllocation(), and when the number of tabs is large, each should
-// get exactly GetMinimumTabAllocation() and not less.
+// GetMinimumClientAllocation(), and when the number of tabs is large,
+// each should get exactly GetMinimumClientAllocation() and not less.
TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocation) {
size_t max_stubs_before_no_bonus =
- GetAvailableGpuMemory() / (GetMinimumTabAllocation() + 1);
+ GetAvailableGpuMemory() / (GetMinimumClientAllocation() + 1);
std::vector<FakeClient*> stubs;
for (size_t i = 0; i < max_stubs_before_no_bonus; ++i) {
@@ -479,7 +479,7 @@ TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocation) {
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stubs[i]->allocation_));
EXPECT_GT(
stubs[i]->allocation_.renderer_allocation.bytes_limit_when_visible,
- static_cast<size_t>(GetMinimumTabAllocation()));
+ static_cast<size_t>(GetMinimumClientAllocation()));
}
FakeClient extra_stub(&memmgr_, GenerateUniqueSurfaceId(), true);
@@ -489,7 +489,7 @@ TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocation) {
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stubs[i]->allocation_));
EXPECT_EQ(
stubs[i]->allocation_.renderer_allocation.bytes_limit_when_visible,
- GetMinimumTabAllocation());
+ GetMinimumClientAllocation());
}
for (size_t i = 0; i < max_stubs_before_no_bonus; ++i) {
@@ -602,7 +602,7 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_EQ(stats.size(), 2ul);
EXPECT_GT(stub1allocation2, 0ul);
EXPECT_GT(stub2allocation2, 0ul);
- if (stub1allocation2 != GetMaximumTabAllocation())
+ if (stub1allocation2 != GetMaximumClientAllocation())
EXPECT_LT(stub1allocation2, stub1allocation1);
FakeClient stub3(&memmgr_, GenerateUniqueSurfaceId(), true);
@@ -619,7 +619,7 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_GT(stub1allocation3, 0ul);
EXPECT_GT(stub2allocation3, 0ul);
EXPECT_GT(stub3allocation3, 0ul);
- if (stub1allocation3 != GetMaximumTabAllocation())
+ if (stub1allocation3 != GetMaximumClientAllocation())
EXPECT_LT(stub1allocation3, stub1allocation2);
stub1.SetVisible(false);
@@ -637,7 +637,7 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_GT(stub1allocation4, 0ul);
EXPECT_GE(stub2allocation4, 0ul);
EXPECT_GT(stub3allocation4, 0ul);
- if (stub3allocation3 != GetMaximumTabAllocation())
+ if (stub3allocation3 != GetMaximumClientAllocation())
EXPECT_GT(stub3allocation4, stub3allocation3);
}
@@ -646,39 +646,39 @@ TEST_F(GpuMemoryManagerTest, TestManagedUsageTracking) {
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true),
stub2(&memmgr_, GenerateUniqueSurfaceId(), false);
EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Set memory allocations and verify the results are reflected.
stub1.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 5, false));
stub2.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 7, false));
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Remove a visible client
stub1.client_state_.reset();
EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
stub1.client_state_.reset(memmgr_.CreateClientState(&stub1, true, true));
EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
stub1.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 5, false));
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
- // Remove a backgrounded client
+ // Remove a nonvisible client
stub2.client_state_.reset();
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_nonvisible_);
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_nonvisible_);
stub2.client_state_.reset(memmgr_.CreateClientState(&stub2, true, false));
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(0ul, memmgr_.bytes_allocated_managed_nonvisible_);
stub2.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 7, false));
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Create and then destroy some stubs, and verify their allocations go away.
{
@@ -687,40 +687,40 @@ TEST_F(GpuMemoryManagerTest, TestManagedUsageTracking) {
stub3.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 1, false));
stub4.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 2, false));
EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(9ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(9ul, memmgr_.bytes_allocated_managed_nonvisible_);
}
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Do no-op changes to stubs' visibility and make sure nothing chnages.
stub1.SetVisible(true);
stub2.SetVisible(false);
EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Change visbility state.
stub1.SetVisible(false);
stub2.SetVisible(true);
EXPECT_EQ(7ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(5ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Increase allocation amounts.
stub1.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 6, false));
stub2.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 8, false));
EXPECT_EQ(8ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_nonvisible_);
// Decrease allocation amounts.
stub1.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 4, false));
stub2.SetManagedMemoryStats(GpuManagedMemoryStats(0, 0, 6, false));
EXPECT_EQ(6ul, memmgr_.bytes_allocated_managed_visible_);
- EXPECT_EQ(4ul, memmgr_.bytes_allocated_managed_backgrounded_);
+ EXPECT_EQ(4ul, memmgr_.bytes_allocated_managed_nonvisible_);
}
// Test GpuMemoryManager's background cutoff threshoulds
TEST_F(GpuMemoryManagerTest, TestBackgroundCutoff) {
memmgr_.TestingSetAvailableGpuMemory(64);
- memmgr_.TestingSetBackgroundedAvailableGpuMemory(16);
+ memmgr_.TestingSetNonvisibleAvailableGpuMemory(16);
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
@@ -733,9 +733,9 @@ TEST_F(GpuMemoryManagerTest, TestBackgroundCutoff) {
// stub1 now fits, so it should have a full budget.
stub1.SetManagedMemoryStats(GpuManagedMemoryStats(16, 24, 18, false));
Manage();
- EXPECT_EQ(memmgr_.bytes_backgrounded_available_gpu_memory_,
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_,
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory());
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub1.BytesWhenNotVisible());
// Background stub1.
@@ -747,11 +747,11 @@ TEST_F(GpuMemoryManagerTest, TestBackgroundCutoff) {
FakeClient stub2(&memmgr_, GenerateUniqueSurfaceId(), true);
stub2.SetManagedMemoryStats(GpuManagedMemoryStats(16, 50, 48, false));
Manage();
- EXPECT_EQ(memmgr_.bytes_backgrounded_available_gpu_memory_,
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_,
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory());
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub1.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub2.BytesWhenNotVisible());
// Increase stub2 to force stub1 to be evicted.
@@ -759,32 +759,32 @@ TEST_F(GpuMemoryManagerTest, TestBackgroundCutoff) {
Manage();
EXPECT_EQ(0ul,
stub1.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub2.BytesWhenNotVisible());
}
// Test GpuMemoryManager's background MRU behavior
TEST_F(GpuMemoryManagerTest, TestBackgroundMru) {
memmgr_.TestingSetAvailableGpuMemory(64);
- memmgr_.TestingSetBackgroundedAvailableGpuMemory(16);
+ memmgr_.TestingSetNonvisibleAvailableGpuMemory(16);
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
FakeClient stub2(&memmgr_, GenerateUniqueSurfaceId(), true);
FakeClient stub3(&memmgr_, GenerateUniqueSurfaceId(), true);
// When all are visible, they should all be allowed to have memory
- // should they become backgrounded.
+ // should they become nonvisible.
stub1.SetManagedMemoryStats(GpuManagedMemoryStats(7, 24, 7, false));
stub2.SetManagedMemoryStats(GpuManagedMemoryStats(7, 24, 7, false));
stub3.SetManagedMemoryStats(GpuManagedMemoryStats(7, 24, 7, false));
Manage();
- EXPECT_EQ(memmgr_.bytes_backgrounded_available_gpu_memory_,
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_,
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory());
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub1.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub2.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub3.BytesWhenNotVisible());
@@ -792,33 +792,33 @@ TEST_F(GpuMemoryManagerTest, TestBackgroundMru) {
stub2.SetVisible(false);
stub1.SetVisible(false);
Manage();
- EXPECT_EQ(memmgr_.bytes_backgrounded_available_gpu_memory_,
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_,
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory());
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub1.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub2.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub3.BytesWhenNotVisible());
// Now background stub 3, and it should cause stub 2 to be
// evicted because it was set non-visible first
stub3.SetVisible(false);
Manage();
- EXPECT_EQ(memmgr_.bytes_backgrounded_available_gpu_memory_,
- memmgr_.GetCurrentBackgroundedAvailableGpuMemory());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_,
+ memmgr_.GetCurrentNonvisibleAvailableGpuMemory());
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub1.BytesWhenNotVisible());
EXPECT_EQ(0ul,
stub2.BytesWhenNotVisible());
- EXPECT_EQ(memmgr_.GetCurrentBackgroundedAvailableGpuMemory(),
+ EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(),
stub3.BytesWhenNotVisible());
}
// Test GpuMemoryManager's tracking of unmanaged (e.g, WebGL) memory.
TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) {
memmgr_.TestingSetAvailableGpuMemory(64);
- memmgr_.TestingSetBackgroundedAvailableGpuMemory(16);
+ memmgr_.TestingSetNonvisibleAvailableGpuMemory(16);
memmgr_.TestingSetUnmanagedLimitStep(16);
memmgr_.TestingSetMinimumClientAllocation(8);
@@ -826,7 +826,7 @@ TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) {
// Expect that the one stub get the maximum tab allocation.
Manage();
- EXPECT_EQ(memmgr_.GetMaximumTabAllocation(),
+ EXPECT_EQ(memmgr_.GetMaximumClientAllocation(),
stub1.BytesWhenVisible());
// Now allocate some unmanaged memory and make sure the amount
@@ -837,7 +837,7 @@ TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) {
48,
gpu::gles2::MemoryTracker::kUnmanaged);
Manage();
- EXPECT_GT(memmgr_.GetMaximumTabAllocation(),
+ EXPECT_GT(memmgr_.GetMaximumClientAllocation(),
stub1.BytesWhenVisible());
// Now allocate the entire FB worth of unmanaged memory, and
@@ -848,7 +848,7 @@ TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) {
64,
gpu::gles2::MemoryTracker::kUnmanaged);
Manage();
- EXPECT_EQ(memmgr_.GetMinimumTabAllocation(),
+ EXPECT_EQ(memmgr_.GetMinimumClientAllocation(),
stub1.BytesWhenVisible());
// Far-oversubscribe the entire FB, and make sure we stay at
@@ -859,7 +859,7 @@ TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) {
999,
gpu::gles2::MemoryTracker::kUnmanaged);
Manage();
- EXPECT_EQ(memmgr_.GetMinimumTabAllocation(),
+ EXPECT_EQ(memmgr_.GetMinimumClientAllocation(),
stub1.BytesWhenVisible());
// Delete all tracked memory so we don't hit leak checks.