diff options
-rw-r--r-- | content/common/gpu/gpu_memory_manager.cc | 45 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager.h | 32 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager_client.cc | 1 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager_client.h | 1 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager_unittest.cc | 176 |
5 files changed, 236 insertions, 19 deletions
diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc index ce96f4d..1d63e5f 100644 --- a/content/common/gpu/gpu_memory_manager.cc +++ b/content/common/gpu/gpu_memory_manager.cc @@ -56,7 +56,7 @@ GpuMemoryManager::GpuMemoryManager( bytes_available_gpu_memory_(0), bytes_available_gpu_memory_overridden_(false), bytes_minimum_per_client_(0), - bytes_minimum_per_client_overridden_(false), + bytes_default_per_client_(0), bytes_nonvisible_available_gpu_memory_(0), bytes_allocated_managed_current_(0), bytes_allocated_managed_visible_(0), @@ -71,6 +71,15 @@ GpuMemoryManager::GpuMemoryManager( disable_schedule_manage_(false) { CommandLine* command_line = CommandLine::ForCurrentProcess(); + +#if defined(OS_ANDROID) + bytes_default_per_client_ = 32 * 1024 * 1024; + bytes_minimum_per_client_ = 32 * 1024 * 1024; +#else + bytes_default_per_client_ = 64 * 1024 * 1024; + bytes_minimum_per_client_ = 64 * 1024 * 1024; +#endif + if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { base::StringToUint64( command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), @@ -138,18 +147,6 @@ uint64 GpuMemoryManager::GetMaximumClientAllocation() const { #endif } -uint64 GpuMemoryManager::GetMinimumClientAllocation() const { - if (bytes_minimum_per_client_overridden_) - return bytes_minimum_per_client_; -#if defined(OS_ANDROID) - return 32 * 1024 * 1024; -#elif defined(OS_CHROMEOS) - return 64 * 1024 * 1024; -#else - return 64 * 1024 * 1024; -#endif -} - uint64 GpuMemoryManager::CalcAvailableFromViewportArea(int viewport_area) { // We can't query available GPU memory from the system on Android, but // 18X the viewport and 50% of the dalvik heap size give us a good @@ -389,6 +386,14 @@ void GpuMemoryManager::SetClientStateManagedMemoryStats( &bytes_allocated_managed_nonvisible_); client_state->managed_memory_stats_ = stats; + // If this is the first time that stats have been received for this + // client, use them immediately. + if (!client_state->managed_memory_stats_received_) { + client_state->managed_memory_stats_received_ = true; + ScheduleManage(kScheduleManageNow); + return; + } + if (use_nonuniform_memory_policy_) { // If these statistics sit outside of the range that we used in our // computation of memory allocations then recompute the allocations. @@ -561,6 +566,9 @@ uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible( uint64 bytes_overall_cap) { GpuManagedMemoryStats* stats = &client_state->managed_memory_stats_; + if (!client_state->managed_memory_stats_received_) + return GetDefaultClientAllocation(); + uint64 bytes_required = 9 * stats->bytes_required / 8; bytes_required = std::min(bytes_required, GetMaximumClientAllocation()); bytes_required = std::max(bytes_required, GetMinimumClientAllocation()); @@ -582,6 +590,10 @@ uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible( uint64 GpuMemoryManager::ComputeClientAllocationWhenNonvisible( GpuMemoryManagerClientState* client_state) { + + if (!client_state->managed_memory_stats_received_) + return 0; + return 9 * client_state->managed_memory_stats_.bytes_required / 8; } @@ -755,6 +767,11 @@ void GpuMemoryManager::ComputeNonvisibleSurfacesAllocationsNonuniform() { ++it) { GpuMemoryManagerClientState* client_state = *it; + // If this client is nonvisible and has already had its contents discarded, + // don't re-generate the contents until the client becomes visible again. + if (!client_state->bytes_allocation_when_nonvisible_) + continue; + client_state->bytes_allocation_when_nonvisible_ = ComputeClientAllocationWhenNonvisible(client_state); @@ -762,7 +779,7 @@ void GpuMemoryManager::ComputeNonvisibleSurfacesAllocationsNonuniform() { // this client still fits, all it to keep its contents. if (bytes_allocated_nonvisible + client_state->bytes_allocation_when_nonvisible_ > - bytes_allocated_nonvisible) { + bytes_available_nonvisible) { client_state->bytes_allocation_when_nonvisible_ = 0; } bytes_allocated_nonvisible += diff --git a/content/common/gpu/gpu_memory_manager.h b/content/common/gpu/gpu_memory_manager.h index a8eaf99..895f7b9 100644 --- a/content/common/gpu/gpu_memory_manager.h +++ b/content/common/gpu/gpu_memory_manager.h @@ -95,6 +95,15 @@ class CONTENT_EXPORT GpuMemoryManager : FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, TestUnmanagedTracking); + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + BackgroundMru); + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + BackgroundDiscardPersistent); + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + UnmanagedTracking); + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + DefaultAllocation); + typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*> TrackingGroupMap; @@ -144,9 +153,16 @@ class CONTENT_EXPORT GpuMemoryManager : // Maximum cap on total GPU memory, no matter how much the GPU reports. uint64 GetMaximumTotalGpuMemory() const; - // The maximum and minimum amount of memory that a tab may be assigned. + // The maximum and minimum amount of memory that a client may be assigned. uint64 GetMaximumClientAllocation() const; - uint64 GetMinimumClientAllocation() const; + uint64 GetMinimumClientAllocation() const { + return bytes_minimum_per_client_; + } + // The default amount of memory that a client is assigned, if it has not + // reported any memory usage stats yet. + uint64 GetDefaultClientAllocation() const { + return bytes_default_per_client_; + } // Get a reasonable memory limit from a viewport's surface area. static uint64 CalcAvailableFromViewportArea(int viewport_area); @@ -185,6 +201,9 @@ class CONTENT_EXPORT GpuMemoryManager : ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state); // Interfaces for testing + void TestingSetUseNonuniformMemoryPolicy(bool use_nonuniform_memory_policy) { + use_nonuniform_memory_policy_ = use_nonuniform_memory_policy; + } void TestingDisableScheduleManage() { disable_schedule_manage_ = true; } void TestingSetAvailableGpuMemory(uint64 bytes) { bytes_available_gpu_memory_ = bytes; @@ -193,7 +212,10 @@ class CONTENT_EXPORT GpuMemoryManager : void TestingSetMinimumClientAllocation(uint64 bytes) { bytes_minimum_per_client_ = bytes; - bytes_minimum_per_client_overridden_ = true; + } + + void TestingSetDefaultClientAllocation(uint64 bytes) { + bytes_default_per_client_ = bytes; } void TestingSetUnmanagedLimitStep(uint64 bytes) { @@ -230,9 +252,9 @@ class CONTENT_EXPORT GpuMemoryManager : uint64 bytes_available_gpu_memory_; bool bytes_available_gpu_memory_overridden_; - // The minimum allocation that may be given to a single renderer. + // The minimum and default allocations for a single client. uint64 bytes_minimum_per_client_; - bool bytes_minimum_per_client_overridden_; + uint64 bytes_default_per_client_; // The maximum amount of memory that can be allocated for GPU resources // in nonvisible renderers. diff --git a/content/common/gpu/gpu_memory_manager_client.cc b/content/common/gpu/gpu_memory_manager_client.cc index 0c2b11f..fb3182c 100644 --- a/content/common/gpu/gpu_memory_manager_client.cc +++ b/content/common/gpu/gpu_memory_manager_client.cc @@ -22,6 +22,7 @@ GpuMemoryManagerClientState::GpuMemoryManagerClientState( has_surface_(has_surface), visible_(visible), list_iterator_valid_(false), + managed_memory_stats_received_(false), bytes_nicetohave_limit_low_(0), bytes_nicetohave_limit_high_(0), bytes_allocation_when_visible_(0), diff --git a/content/common/gpu/gpu_memory_manager_client.h b/content/common/gpu/gpu_memory_manager_client.h index 346451e..ca7c002 100644 --- a/content/common/gpu/gpu_memory_manager_client.h +++ b/content/common/gpu/gpu_memory_manager_client.h @@ -83,6 +83,7 @@ class CONTENT_EXPORT GpuMemoryManagerClientState { // Statistics about memory usage. GpuManagedMemoryStats managed_memory_stats_; + bool managed_memory_stats_received_; // When managed_memory_stats_.bytes_nicetohave leaves the range // [low_, high_], then re-adjust memory limits. diff --git a/content/common/gpu/gpu_memory_manager_unittest.cc b/content/common/gpu/gpu_memory_manager_unittest.cc index 06c785c..b9c3727 100644 --- a/content/common/gpu/gpu_memory_manager_unittest.cc +++ b/content/common/gpu/gpu_memory_manager_unittest.cc @@ -164,6 +164,7 @@ class GpuMemoryManagerTest : public testing::Test { GpuMemoryManagerTest() : memmgr_(0, kFrontbufferLimitForTest) { memmgr_.TestingDisableScheduleManage(); + memmgr_.TestingSetUseNonuniformMemoryPolicy(false); } virtual void SetUp() { @@ -250,6 +251,17 @@ class GpuMemoryManagerTest : public testing::Test { GpuMemoryManager memmgr_; }; +class GpuMemoryManagerTestNonuniform : public GpuMemoryManagerTest { + protected: + void SetClientStats( + FakeClient* client, + uint64 required, + uint64 nicetohave) { + client->SetManagedMemoryStats( + GpuManagedMemoryStats(required, nicetohave, 0, false)); + } +}; + // Test GpuMemoryManager::Manage basic functionality. // Expect memory allocation to set suggest_have_frontbuffer/backbuffer // according to visibility and last used time for stubs with surface. @@ -873,4 +885,168 @@ TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) { gpu::gles2::MemoryTracker::kUnmanaged); } +// Test nonvisible MRU behavior (the most recently used nonvisible clients +// keep their contents). +TEST_F(GpuMemoryManagerTestNonuniform, BackgroundMru) { + // Set memory manager constants for this test + memmgr_.TestingSetUseNonuniformMemoryPolicy(true); + memmgr_.TestingSetAvailableGpuMemory(64); + memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); + memmgr_.TestingSetMinimumClientAllocation(8); + + FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); + FakeClient stub2(&memmgr_, GenerateUniqueSurfaceId(), true); + FakeClient stub3(&memmgr_, GenerateUniqueSurfaceId(), true); + + // When all are visible, they should all be allowed to have memory + // should they become nonvisible. + SetClientStats(&stub1, 6, 23); + SetClientStats(&stub2, 6, 23); + SetClientStats(&stub3, 6, 23); + Manage(); + EXPECT_GE(stub1.BytesWhenVisible(), 20u); + EXPECT_GE(stub2.BytesWhenVisible(), 20u); + EXPECT_GE(stub3.BytesWhenVisible(), 20u); + EXPECT_LT(stub1.BytesWhenVisible(), 22u); + EXPECT_LT(stub2.BytesWhenVisible(), 22u); + EXPECT_LT(stub3.BytesWhenVisible(), 22u); + EXPECT_GE(stub1.BytesWhenNotVisible(), 6u); + EXPECT_GE(stub2.BytesWhenNotVisible(), 6u); + EXPECT_GE(stub3.BytesWhenNotVisible(), 6u); + + // Background stubs 1 and 2, and they should fit. All stubs should + // have their full nicetohave budget should they become visible. + stub2.SetVisible(false); + stub1.SetVisible(false); + Manage(); + EXPECT_GE(stub1.BytesWhenVisible(), 23u); + EXPECT_GE(stub2.BytesWhenVisible(), 23u); + EXPECT_GE(stub3.BytesWhenVisible(), 23u); + EXPECT_LT(stub1.BytesWhenVisible(), 32u); + EXPECT_LT(stub2.BytesWhenVisible(), 32u); + EXPECT_LT(stub3.BytesWhenVisible(), 32u); + EXPECT_GE(stub1.BytesWhenNotVisible(), 6u); + EXPECT_GE(stub2.BytesWhenNotVisible(), 6u); + EXPECT_GE(stub3.BytesWhenNotVisible(), 6u); + + // Now background stub 3, and it should cause stub 2 to be + // evicted because it was set non-visible first + stub3.SetVisible(false); + Manage(); + EXPECT_GE(stub1.BytesWhenNotVisible(), 6u); + EXPECT_EQ(stub2.BytesWhenNotVisible(), 0u); + EXPECT_GE(stub3.BytesWhenNotVisible(), 6u); +} + +// Test that once a backgrounded client has dropped its resources, it +// doesn't get them back until it becomes visible again. +TEST_F(GpuMemoryManagerTestNonuniform, BackgroundDiscardPersistent) { + // Set memory manager constants for this test + memmgr_.TestingSetUseNonuniformMemoryPolicy(true); + memmgr_.TestingSetAvailableGpuMemory(64); + memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); + memmgr_.TestingSetMinimumClientAllocation(8); + + FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); + FakeClient stub2(&memmgr_, GenerateUniqueSurfaceId(), true); + + // Both clients should be able to keep their contents should one of + // them become nonvisible. + SetClientStats(&stub1, 10, 20); + SetClientStats(&stub2, 10, 20); + Manage(); + EXPECT_GE(stub1.BytesWhenNotVisible(), 10u); + EXPECT_GE(stub2.BytesWhenNotVisible(), 10u); + + // If they both go nonvisible, then only the most recently used client + // should keep its contents. + stub1.SetVisible(false); + stub2.SetVisible(false); + Manage(); + EXPECT_EQ(stub1.BytesWhenNotVisible(), 0u); + EXPECT_GE(stub2.BytesWhenNotVisible(), 10u); + + // When becoming visible, stub 2 should get its contents back, and + // retain them next time it is made nonvisible. + stub2.SetVisible(true); + Manage(); + EXPECT_GE(stub2.BytesWhenNotVisible(), 10u); + stub2.SetVisible(false); + Manage(); + EXPECT_GE(stub2.BytesWhenNotVisible(), 10u); +} + +// Test tracking of unmanaged (e.g, WebGL) memory. +TEST_F(GpuMemoryManagerTestNonuniform, UnmanagedTracking) { + // Set memory manager constants for this test + memmgr_.TestingSetUseNonuniformMemoryPolicy(true); + memmgr_.TestingSetAvailableGpuMemory(64); + memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); + memmgr_.TestingSetMinimumClientAllocation(8); + memmgr_.TestingSetUnmanagedLimitStep(16); + + FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); + + // Expect that the one stub get its nicetohave level. + SetClientStats(&stub1, 16, 32); + Manage(); + EXPECT_GE(stub1.BytesWhenVisible(), 32u); + + // Now allocate some unmanaged memory and make sure the amount + // goes down. + memmgr_.TrackMemoryAllocatedChange( + stub1.tracking_group_.get(), + 0, + 48, + gpu::gles2::MemoryTracker::kUnmanaged); + Manage(); + EXPECT_LT(stub1.BytesWhenVisible(), 24u); + + // Now allocate the entire FB worth of unmanaged memory, and + // make sure that we stay stuck at the minimum tab allocation. + memmgr_.TrackMemoryAllocatedChange( + stub1.tracking_group_.get(), + 48, + 64, + gpu::gles2::MemoryTracker::kUnmanaged); + Manage(); + EXPECT_EQ(stub1.BytesWhenVisible(), 8u); + + // Far-oversubscribe the entire FB, and make sure we stay at + // the minimum allocation, and don't blow up. + memmgr_.TrackMemoryAllocatedChange( + stub1.tracking_group_.get(), + 64, + 999, + gpu::gles2::MemoryTracker::kUnmanaged); + Manage(); + EXPECT_EQ(stub1.BytesWhenVisible(), 8u); + + // Delete all tracked memory so we don't hit leak checks. + memmgr_.TrackMemoryAllocatedChange( + stub1.tracking_group_.get(), + 999, + 0, + gpu::gles2::MemoryTracker::kUnmanaged); +} + +// Test the default allocation levels are used. +TEST_F(GpuMemoryManagerTestNonuniform, DefaultAllocation) { + // Set memory manager constants for this test + memmgr_.TestingSetUseNonuniformMemoryPolicy(true); + memmgr_.TestingSetAvailableGpuMemory(64); + memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); + memmgr_.TestingSetMinimumClientAllocation(8); + memmgr_.TestingSetDefaultClientAllocation(16); + + FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); + + // Expect that a client which has not sent stats receive the + // default allocation. + Manage(); + EXPECT_EQ(stub1.BytesWhenVisible(), + memmgr_.GetDefaultClientAllocation()); + EXPECT_EQ(stub1.BytesWhenNotVisible(), 0u); +} + } // namespace content |