diff options
author | ccameron@chromium.org <ccameron@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-03-12 22:57:28 +0000 |
---|---|---|
committer | ccameron@chromium.org <ccameron@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-03-12 22:57:28 +0000 |
commit | 469b82d84ef7e3d9eb37eeb62546af240225f73e (patch) | |
tree | ac8d5631b6c540ac1f66c62416c8275c1035188a | |
parent | 7ffee21c761211191b3064a480d009286bb7b2e8 (diff) | |
download | chromium_src-469b82d84ef7e3d9eb37eeb62546af240225f73e.zip chromium_src-469b82d84ef7e3d9eb37eeb62546af240225f73e.tar.gz chromium_src-469b82d84ef7e3d9eb37eeb62546af240225f73e.tar.bz2 |
Delete memory manager dead code.
There have existed two paths in the memory manager for
a few months -- the old path which uniformly distributes
all memory across all visible renderers, and the new path
which takes into account the needs of each renderer.
The transition was made from the old policy to the new
policy quite a while ago, so we can safely get rid of
the old policy.
BUG=150883
Review URL: https://chromiumcodereview.appspot.com/12475002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@187678 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | chrome/browser/chrome_browser_main.cc | 7 | ||||
-rw-r--r-- | chrome/browser/chromeos/login/chrome_restart_request.cc | 1 | ||||
-rw-r--r-- | chrome/browser/gpu/chrome_gpu_util.cc | 61 | ||||
-rw-r--r-- | chrome/browser/gpu/chrome_gpu_util.h | 8 | ||||
-rw-r--r-- | content/browser/gpu/gpu_data_manager_impl.cc | 16 | ||||
-rw-r--r-- | content/browser/gpu/gpu_data_manager_impl.h | 2 | ||||
-rw-r--r-- | content/browser/gpu/gpu_process_host.cc | 6 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager.cc | 229 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager.h | 47 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_manager_unittest.cc | 200 | ||||
-rw-r--r-- | content/common/gpu/gpu_memory_uma_stats.h | 3 | ||||
-rw-r--r-- | content/common/gpu/gpu_messages.h | 6 | ||||
-rw-r--r-- | content/gpu/gpu_child_thread.cc | 7 | ||||
-rw-r--r-- | content/public/browser/gpu_data_manager.h | 5 | ||||
-rw-r--r-- | gpu/command_buffer/service/gpu_switches.cc | 6 | ||||
-rw-r--r-- | gpu/command_buffer/service/gpu_switches.h | 1 |
16 files changed, 35 insertions, 570 deletions
diff --git a/chrome/browser/chrome_browser_main.cc b/chrome/browser/chrome_browser_main.cc index 46daced..3f8ccfa 100644 --- a/chrome/browser/chrome_browser_main.cc +++ b/chrome/browser/chrome_browser_main.cc @@ -1052,9 +1052,6 @@ void ChromeBrowserMainParts::PostProfileInit() { void ChromeBrowserMainParts::PreBrowserStart() { for (size_t i = 0; i < chrome_extra_parts_.size(); ++i) chrome_extra_parts_[i]->PreBrowserStart(); -#if !defined(OS_ANDROID) - gpu_util::InstallBrowserMonitor(); -#endif three_d_observer_.reset(new ThreeDAPIObserver()); } @@ -1697,10 +1694,6 @@ void ChromeBrowserMainParts::PostMainMessageLoopRun() { for (size_t i = 0; i < chrome_extra_parts_.size(); ++i) chrome_extra_parts_[i]->PostMainMessageLoopRun(); -#if !defined(OS_ANDROID) - gpu_util::UninstallBrowserMonitor(); -#endif - #if defined(OS_WIN) // Log the search engine chosen on first run. Do this at shutdown, after any // changes are made from the first run bubble link, etc. diff --git a/chrome/browser/chromeos/login/chrome_restart_request.cc b/chrome/browser/chromeos/login/chrome_restart_request.cc index 76a742c..0ee45c7 100644 --- a/chrome/browser/chromeos/login/chrome_restart_request.cc +++ b/chrome/browser/chromeos/login/chrome_restart_request.cc @@ -78,7 +78,6 @@ std::string DeriveCommandLine(const GURL& start_url, ::switches::kDisableForceCompositingMode, ::switches::kDisableGpuWatchdog, ::switches::kDisableLoginAnimations, - ::switches::kDisableNonuniformGpuMemPolicy, ::switches::kDisableOobeAnimation, ::switches::kDisablePanelFitting, ::switches::kDisableThreadedCompositing, diff --git a/chrome/browser/gpu/chrome_gpu_util.cc b/chrome/browser/gpu/chrome_gpu_util.cc index fccdc45..d6a0e90 100644 --- a/chrome/browser/gpu/chrome_gpu_util.cc +++ b/chrome/browser/gpu/chrome_gpu_util.cc @@ -24,67 +24,6 @@ using content::GpuDataManager; namespace gpu_util { -// The BrowserMonitor class is used to track the number of currently open -// browser windows, so that the gpu can be notified when they are created or -// destroyed. We only count tabbed windows for this purpose. - -// There's no BrowserList on Android/ -#if !defined(OS_ANDROID) -class BrowserMonitor : public chrome::BrowserListObserver { - public: - static BrowserMonitor* GetInstance() { - static BrowserMonitor* instance = NULL; - if (!instance) - instance = new BrowserMonitor; - return instance; - } - - void Install() { - if (!installed_) { - BrowserList::AddObserver(this); - installed_ = true; - } - } - - void Uninstall() { - if (installed_) { - BrowserList::RemoveObserver(this); - installed_ = false; - } - } - - private: - BrowserMonitor() : num_browsers_(0), installed_(false) { - } - - virtual ~BrowserMonitor() { - } - - // BrowserListObserver implementation. - virtual void OnBrowserAdded(Browser* browser) OVERRIDE { - if (browser->type() == Browser::TYPE_TABBED) - content::GpuDataManager::GetInstance()->SetWindowCount(++num_browsers_); - } - - virtual void OnBrowserRemoved(Browser* browser) OVERRIDE { - if (browser->type() == Browser::TYPE_TABBED) - content::GpuDataManager::GetInstance()->SetWindowCount(--num_browsers_); - } - - uint32 num_browsers_; - bool installed_; -}; - -void InstallBrowserMonitor() { - BrowserMonitor::GetInstance()->Install(); -} - -void UninstallBrowserMonitor() { - BrowserMonitor::GetInstance()->Uninstall(); -} - -#endif // !defined(OS_ANDROID) - void DisableCompositingFieldTrial() { base::FieldTrial* trial = base::FieldTrialList::Find(content::kGpuCompositingFieldTrialName); diff --git a/chrome/browser/gpu/chrome_gpu_util.h b/chrome/browser/gpu/chrome_gpu_util.h index 4d14444..1edff61 100644 --- a/chrome/browser/gpu/chrome_gpu_util.h +++ b/chrome/browser/gpu/chrome_gpu_util.h @@ -7,14 +7,6 @@ namespace gpu_util { -// Sets up a monitor for browser windows, to be used to determine gpu -// managed memory allocation. -// Not supported on Android. -#if !defined(OS_ANDROID) -void InstallBrowserMonitor(); -void UninstallBrowserMonitor(); -#endif // !defined(OS_ANDROID) - // Sets up force-compositing-mode and threaded compositing field trials. void InitializeCompositingFieldTrial(); diff --git a/content/browser/gpu/gpu_data_manager_impl.cc b/content/browser/gpu/gpu_data_manager_impl.cc index 4790b09..5c31e37 100644 --- a/content/browser/gpu/gpu_data_manager_impl.cc +++ b/content/browser/gpu/gpu_data_manager_impl.cc @@ -206,22 +206,6 @@ void GpuDataManagerImpl::RemoveObserver(GpuDataManagerObserver* observer) { observer_list_->RemoveObserver(observer); } -void GpuDataManagerImpl::SetWindowCount(uint32 count) { - { - base::AutoLock auto_lock(gpu_info_lock_); - window_count_ = count; - } - GpuProcessHost::SendOnIO( - GpuProcessHost::GPU_PROCESS_KIND_SANDBOXED, - CAUSE_FOR_GPU_LAUNCH_NO_LAUNCH, - new GpuMsg_SetVideoMemoryWindowCount(count)); -} - -uint32 GpuDataManagerImpl::GetWindowCount() const { - base::AutoLock auto_lock(gpu_info_lock_); - return window_count_; -} - void GpuDataManagerImpl::UnblockDomainFrom3DAPIs(const GURL& url) { // This method must do two things: // diff --git a/content/browser/gpu/gpu_data_manager_impl.h b/content/browser/gpu/gpu_data_manager_impl.h index 8830fed..9de762f 100644 --- a/content/browser/gpu/gpu_data_manager_impl.h +++ b/content/browser/gpu/gpu_data_manager_impl.h @@ -69,8 +69,6 @@ class CONTENT_EXPORT GpuDataManagerImpl virtual void RegisterSwiftShaderPath(const base::FilePath& path) OVERRIDE; virtual void AddObserver(GpuDataManagerObserver* observer) OVERRIDE; virtual void RemoveObserver(GpuDataManagerObserver* observer) OVERRIDE; - virtual void SetWindowCount(uint32 count) OVERRIDE; - virtual uint32 GetWindowCount() const OVERRIDE; virtual void UnblockDomainFrom3DAPIs(const GURL& url) OVERRIDE; virtual void DisableGpuWatchdog() OVERRIDE; virtual void SetGLStrings(const std::string& gl_vendor, diff --git a/content/browser/gpu/gpu_process_host.cc b/content/browser/gpu/gpu_process_host.cc index 4a5734d..ea817c1 100644 --- a/content/browser/gpu/gpu_process_host.cc +++ b/content/browser/gpu/gpu_process_host.cc @@ -432,13 +432,10 @@ GpuProcessHost::~GpuProcessHost() { UMA_HISTOGRAM_COUNTS_100("GPU.AtExitSurfaceCount", GpuSurfaceTracker::Get()->GetSurfaceCount()); - UMA_HISTOGRAM_BOOLEAN("GPU.AtExitReceivedMemoryStats", uma_memory_stats_received_); if (uma_memory_stats_received_) { - UMA_HISTOGRAM_COUNTS_100("GPU.AtExitWindowCount", - uma_memory_stats_.window_count); UMA_HISTOGRAM_COUNTS_100("GPU.AtExitManagedMemoryClientCount", uma_memory_stats_.client_count); UMA_HISTOGRAM_COUNTS_100("GPU.AtExitContextGroupCount", @@ -528,8 +525,7 @@ bool GpuProcessHost::Init() { if (!Send(new GpuMsg_Initialize())) return false; - return Send(new GpuMsg_SetVideoMemoryWindowCount( - GpuDataManagerImpl::GetInstance()->GetWindowCount())); + return true; } void GpuProcessHost::RouteOnUIThread(const IPC::Message& message) { diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc index a3a60e6..9a1402c 100644 --- a/content/common/gpu/gpu_memory_manager.cc +++ b/content/common/gpu/gpu_memory_manager.cc @@ -48,7 +48,6 @@ GpuMemoryManager::GpuMemoryManager( GpuChannelManager* channel_manager, uint64 max_surfaces_with_frontbuffer_soft_limit) : channel_manager_(channel_manager), - use_nonuniform_memory_policy_(true), manage_immediate_scheduled_(false), max_surfaces_with_frontbuffer_soft_limit_( max_surfaces_with_frontbuffer_soft_limit), @@ -56,7 +55,6 @@ GpuMemoryManager::GpuMemoryManager( bytes_available_gpu_memory_overridden_(false), bytes_minimum_per_client_(0), bytes_default_per_client_(0), - bytes_nonvisible_available_gpu_memory_(0), bytes_allocated_managed_current_(0), bytes_allocated_managed_visible_(0), bytes_allocated_managed_nonvisible_(0), @@ -65,8 +63,6 @@ GpuMemoryManager::GpuMemoryManager( bytes_allocated_unmanaged_high_(0), bytes_allocated_unmanaged_low_(0), bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), - window_count_has_been_received_(false), - window_count_(0), disable_schedule_manage_(false) { CommandLine* command_line = CommandLine::ForCurrentProcess(); @@ -79,9 +75,6 @@ GpuMemoryManager::GpuMemoryManager( bytes_minimum_per_client_ = 64 * 1024 * 1024; #endif - if (command_line->HasSwitch(switches::kDisableNonuniformGpuMemPolicy)) - use_nonuniform_memory_policy_ = false; - if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { base::StringToUint64( command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), @@ -90,8 +83,6 @@ GpuMemoryManager::GpuMemoryManager( bytes_available_gpu_memory_overridden_ = true; } else bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); - - UpdateNonvisibleAvailableGpuMemory(); } GpuMemoryManager::~GpuMemoryManager() { @@ -113,14 +104,6 @@ uint64 GpuMemoryManager::GetAvailableGpuMemory() const { return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; } -uint64 GpuMemoryManager::GetCurrentNonvisibleAvailableGpuMemory() const { - if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) { - return std::min(bytes_nonvisible_available_gpu_memory_, - GetAvailableGpuMemory() - bytes_allocated_managed_visible_); - } - return 0; -} - uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const { #if defined(OS_ANDROID) return 32 * 1024 * 1024; @@ -217,16 +200,6 @@ void GpuMemoryManager::UpdateUnmanagedMemoryLimits() { bytes_unmanaged_limit_step_); } -void GpuMemoryManager::UpdateNonvisibleAvailableGpuMemory() { - // Be conservative and disable saving nonvisible clients' textures on Android - // for the moment -#if defined(OS_ANDROID) - bytes_nonvisible_available_gpu_memory_ = 0; -#else - bytes_nonvisible_available_gpu_memory_ = GetAvailableGpuMemory() / 4; -#endif -} - void GpuMemoryManager::ScheduleManage( ScheduleManageTime schedule_manage_time) { if (disable_schedule_manage_) @@ -367,22 +340,14 @@ void GpuMemoryManager::SetClientStateManagedMemoryStats( return; } - if (use_nonuniform_memory_policy_) { - // If these statistics sit outside of the range that we used in our - // computation of memory allocations then recompute the allocations. - if (client_state->managed_memory_stats_.bytes_nice_to_have > - client_state->bytes_nicetohave_limit_high_) { - ScheduleManage(kScheduleManageNow); - } else if (client_state->managed_memory_stats_.bytes_nice_to_have < - client_state->bytes_nicetohave_limit_low_) { - ScheduleManage(kScheduleManageLater); - } - } else { - // If this allocation pushed our usage of nonvisible clients' memory over - // the limit, then schedule a drop of nonvisible memory. - if (bytes_allocated_managed_nonvisible_ > - GetCurrentNonvisibleAvailableGpuMemory()) - ScheduleManage(kScheduleManageLater); + // If these statistics sit outside of the range that we used in our + // computation of memory allocations then recompute the allocations. + if (client_state->managed_memory_stats_.bytes_nice_to_have > + client_state->bytes_nicetohave_limit_high_) { + ScheduleManage(kScheduleManageNow); + } else if (client_state->managed_memory_stats_.bytes_nice_to_have < + client_state->bytes_nicetohave_limit_low_) { + ScheduleManage(kScheduleManageLater); } } @@ -424,44 +389,6 @@ void GpuMemoryManager::GetVideoMemoryUsageStats( bytes_allocated_historical_max_; } -void GpuMemoryManager::SetWindowCount(uint32 window_count) { - bool should_schedule_manage = !window_count_has_been_received_ || - (window_count != window_count_); - window_count_has_been_received_ = true; - window_count_ = window_count; - if (should_schedule_manage) - ScheduleManage(kScheduleManageNow); -} - -// The current Manage algorithm simply classifies contexts (clients) into -// "foreground", "background", or "hibernated" categories. -// For each of these three categories, there are predefined memory allocation -// limits and front/backbuffer states. -// -// Users may or may not have a surfaces, and the rules are different for each. -// -// The rules for categorizing contexts with a surface are: -// 1. Foreground: All visible surfaces. -// * Must have both front and back buffer. -// -// 2. Background: Non visible surfaces, which have not surpassed the -// max_surfaces_with_frontbuffer_soft_limit_ limit. -// * Will have only a frontbuffer. -// -// 3. Hibernated: Non visible surfaces, which have surpassed the -// max_surfaces_with_frontbuffer_soft_limit_ limit. -// * Will not have either buffer. -// -// The considerations for categorizing contexts without a surface are: -// 1. These contexts do not track {visibility,last_used_time}, so cannot -// sort them directly. -// 2. These contexts may be used by, and thus affect, other contexts, and so -// cannot be less visible than any affected context. -// 3. Contexts belong to share groups within which resources can be shared. -// -// As such, the rule for categorizing contexts without a surface is: -// 1. Find the most visible context-with-a-surface within each -// context-without-a-surface's share group, and inherit its visibilty. void GpuMemoryManager::Manage() { manage_immediate_scheduled_ = false; delayed_manage_callback_.Cancel(); @@ -472,20 +399,13 @@ void GpuMemoryManager::Manage() { // Update the limit on unmanaged memory. UpdateUnmanagedMemoryLimits(); - // Update the nonvisible available gpu memory because it depends on - // the available GPU memory. - UpdateNonvisibleAvailableGpuMemory(); - // Determine which clients are "hibernated" (which determines the // distribution of frontbuffers and memory among clients that don't have // surfaces). SetClientsHibernatedState(); // Assign memory allocations to clients that have surfaces. - if (use_nonuniform_memory_policy_) - AssignSurfacesAllocationsNonuniform(); - else - AssignSurfacesAllocationsUniform(); + AssignSurfacesAllocations(); // Assign memory allocations to clients that don't have surfaces. AssignNonSurfacesAllocations(); @@ -573,7 +493,7 @@ uint64 GpuMemoryManager::ComputeClientAllocationWhenNonvisible( return 9 * client_state->managed_memory_stats_.bytes_required / 8; } -void GpuMemoryManager::ComputeVisibleSurfacesAllocationsNonuniform() { +void GpuMemoryManager::ComputeVisibleSurfacesAllocations() { uint64 bytes_available_total = GetAvailableGpuMemory(); uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max(); uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max(); @@ -694,7 +614,7 @@ void GpuMemoryManager::ComputeVisibleSurfacesAllocationsNonuniform() { } } -void GpuMemoryManager::ComputeNonvisibleSurfacesAllocationsNonuniform() { +void GpuMemoryManager::ComputeNonvisibleSurfacesAllocations() { uint64 bytes_allocated_visible = 0; for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); it != clients_visible_mru_.end(); @@ -812,10 +732,10 @@ void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() { } } -void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() { +void GpuMemoryManager::AssignSurfacesAllocations() { // Compute allocation when for all clients. - ComputeVisibleSurfacesAllocationsNonuniform(); - ComputeNonvisibleSurfacesAllocationsNonuniform(); + ComputeVisibleSurfacesAllocations(); + ComputeNonvisibleSurfacesAllocations(); // Distribute the remaining memory to visible clients. DistributeRemainingMemoryToVisibleSurfaces(); @@ -861,85 +781,6 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() { } } -void GpuMemoryManager::AssignSurfacesAllocationsUniform() { - // Determine how much memory to assign to give to visible and nonvisible - // clients. - uint64 bytes_limit_when_visible = GetVisibleClientAllocation(); - - // Experiment to determine if aggressively discarding tiles on OS X - // results in greater stability. -#if defined(OS_MACOSX) - GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible = - GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave; -#else - GpuMemoryAllocationForRenderer::PriorityCutoff priority_cutoff_when_visible = - GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything; -#endif - - // Assign memory allocations to visible clients. - for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); - it != clients_visible_mru_.end(); - ++it) { - GpuMemoryManagerClientState* client_state = *it; - GpuMemoryAllocation allocation; - - allocation.browser_allocation.suggest_have_frontbuffer = true; - allocation.renderer_allocation.bytes_limit_when_visible = - bytes_limit_when_visible; - allocation.renderer_allocation.priority_cutoff_when_visible = - priority_cutoff_when_visible; - - // Allow this client to keep its textures when nonvisible if they - // aren't so expensive that they won't fit. - if (client_state->managed_memory_stats_.bytes_required <= - bytes_nonvisible_available_gpu_memory_) { - allocation.renderer_allocation.bytes_limit_when_not_visible = - GetCurrentNonvisibleAvailableGpuMemory(); - allocation.renderer_allocation.priority_cutoff_when_not_visible = - GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; - } else { - allocation.renderer_allocation.bytes_limit_when_not_visible = 0; - allocation.renderer_allocation.priority_cutoff_when_not_visible = - GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing; - } - - client_state->client_->SetMemoryAllocation(allocation); - } - - // Assign memory allocations to nonvisible clients. - uint64 bytes_allocated_nonvisible = 0; - for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin(); - it != clients_nonvisible_mru_.end(); - ++it) { - GpuMemoryManagerClientState* client_state = *it; - GpuMemoryAllocation allocation; - - allocation.browser_allocation.suggest_have_frontbuffer = - !client_state->hibernated_; - allocation.renderer_allocation.bytes_limit_when_visible = - bytes_limit_when_visible; - allocation.renderer_allocation.priority_cutoff_when_visible = - priority_cutoff_when_visible; - - if (client_state->managed_memory_stats_.bytes_required + - bytes_allocated_nonvisible <= - GetCurrentNonvisibleAvailableGpuMemory()) { - bytes_allocated_nonvisible += - client_state->managed_memory_stats_.bytes_required; - allocation.renderer_allocation.bytes_limit_when_not_visible = - GetCurrentNonvisibleAvailableGpuMemory(); - allocation.renderer_allocation.priority_cutoff_when_not_visible = - GpuMemoryAllocationForRenderer::kPriorityCutoffAllowOnlyRequired; - } else { - allocation.renderer_allocation.bytes_limit_when_not_visible = 0; - allocation.renderer_allocation.priority_cutoff_when_not_visible = - GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNothing; - } - - client_state->client_->SetMemoryAllocation(allocation); - } -} - void GpuMemoryManager::AssignNonSurfacesAllocations() { for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); it != clients_nonsurface_.end(); @@ -1000,47 +841,6 @@ void GpuMemoryManager::SetClientsHibernatedState() const { } } -uint64 GpuMemoryManager::GetVisibleClientAllocation() const { - // Count how many clients will get allocations. - size_t clients_with_surface_visible_count = clients_visible_mru_.size(); - size_t clients_without_surface_not_hibernated_count = 0; - for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); - it != clients_nonsurface_.end(); - ++it) { - GpuMemoryManagerClientState* client_state = *it; - if (!client_state->hibernated_) - clients_without_surface_not_hibernated_count++; - } - - // Calculate bonus allocation by splitting remainder of global limit equally - // after giving out the minimum to those that need it. - size_t num_clients_need_mem = clients_with_surface_visible_count + - clients_without_surface_not_hibernated_count; - uint64 base_allocation_size = GetMinimumClientAllocation() * - num_clients_need_mem; - uint64 bonus_allocation = 0; - if (base_allocation_size < GetAvailableGpuMemory() && - clients_with_surface_visible_count) - bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) / - clients_with_surface_visible_count; - uint64 clients_allocation_when_visible = GetMinimumClientAllocation() + - bonus_allocation; - - // If we have received a window count message, then override the client-based - // scheme with a per-window scheme - if (window_count_has_been_received_) { - clients_allocation_when_visible = std::max( - clients_allocation_when_visible, - GetAvailableGpuMemory() / std::max(window_count_, 1u)); - } - - // Limit the memory per client to its maximum allowed level. - if (clients_allocation_when_visible >= GetMaximumClientAllocation()) - clients_allocation_when_visible = GetMaximumClientAllocation(); - - return clients_allocation_when_visible; -} - void GpuMemoryManager::SendUmaStatsToBrowser() { if (!channel_manager_) return; @@ -1052,7 +852,6 @@ void GpuMemoryManager::SendUmaStatsToBrowser() { clients_nonvisible_mru_.size() + clients_nonsurface_.size(); params.context_group_count = tracking_groups_.size(); - params.window_count = window_count_; channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); } diff --git a/content/common/gpu/gpu_memory_manager.h b/content/common/gpu/gpu_memory_manager.h index 5c3d752..2315c84 100644 --- a/content/common/gpu/gpu_memory_manager.h +++ b/content/common/gpu/gpu_memory_manager.h @@ -55,7 +55,6 @@ class CONTENT_EXPORT GpuMemoryManager : // Retrieve GPU Resource consumption statistics for the task manager void GetVideoMemoryUsageStats( content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const; - void SetWindowCount(uint32 count); GpuMemoryManagerClientState* CreateClientState( GpuMemoryManagerClient* client, bool has_surface, bool visible); @@ -69,8 +68,6 @@ class CONTENT_EXPORT GpuMemoryManager : friend class GpuMemoryManagerClientState; FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, - ComparatorTests); - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, TestManageBasicFunctionality); FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, TestManageChangingVisibility); @@ -93,19 +90,12 @@ class CONTENT_EXPORT GpuMemoryManager : FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, TestManagedUsageTracking); FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, - TestBackgroundCutoff); - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, - TestBackgroundMru); - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, - TestUnmanagedTracking); - - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, BackgroundMru); - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, BackgroundDiscardPersistent); - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, UnmanagedTracking); - FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTestNonuniform, + FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest, DefaultAllocation); typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*> @@ -115,10 +105,7 @@ class CONTENT_EXPORT GpuMemoryManager : void Manage(); void SetClientsHibernatedState() const; - uint64 GetVisibleClientAllocation() const; - uint64 GetCurrentNonvisibleAvailableGpuMemory() const; - void AssignSurfacesAllocationsNonuniform(); - void AssignSurfacesAllocationsUniform(); + void AssignSurfacesAllocations(); void AssignNonSurfacesAllocations(); // Math helper function to compute the maximum value of cap such that @@ -126,8 +113,8 @@ class CONTENT_EXPORT GpuMemoryManager : static uint64 ComputeCap(std::vector<uint64> bytes, uint64 bytes_sum_limit); // Compute the allocation for clients when visible and not visible. - void ComputeVisibleSurfacesAllocationsNonuniform(); - void ComputeNonvisibleSurfacesAllocationsNonuniform(); + void ComputeVisibleSurfacesAllocations(); + void ComputeNonvisibleSurfacesAllocations(); void DistributeRemainingMemoryToVisibleSurfaces(); // Compute the budget for a client. Allow at most bytes_above_required_cap @@ -146,7 +133,6 @@ class CONTENT_EXPORT GpuMemoryManager : // on what the stubs' contexts report. void UpdateAvailableGpuMemory(); void UpdateUnmanagedMemoryLimits(); - void UpdateNonvisibleAvailableGpuMemory(); // The amount of video memory which is available for allocation. uint64 GetAvailableGpuMemory() const; @@ -204,9 +190,6 @@ class CONTENT_EXPORT GpuMemoryManager : ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state); // Interfaces for testing - void TestingSetUseNonuniformMemoryPolicy(bool use_nonuniform_memory_policy) { - use_nonuniform_memory_policy_ = use_nonuniform_memory_policy; - } void TestingDisableScheduleManage() { disable_schedule_manage_ = true; } void TestingSetAvailableGpuMemory(uint64 bytes) { bytes_available_gpu_memory_ = bytes; @@ -225,16 +208,8 @@ class CONTENT_EXPORT GpuMemoryManager : bytes_unmanaged_limit_step_ = bytes; } - void TestingSetNonvisibleAvailableGpuMemory(uint64 bytes) { - bytes_nonvisible_available_gpu_memory_ = bytes; - } - GpuChannelManager* channel_manager_; - // The new memory policy does not uniformly assign memory to tabs, but - // scales the assignments to the tabs' needs. - bool use_nonuniform_memory_policy_; - // A list of all visible and nonvisible clients, in most-recently-used // order (most recently used is first). ClientStateList clients_visible_mru_; @@ -259,10 +234,6 @@ class CONTENT_EXPORT GpuMemoryManager : uint64 bytes_minimum_per_client_; uint64 bytes_default_per_client_; - // The maximum amount of memory that can be allocated for GPU resources - // in nonvisible renderers. - uint64 bytes_nonvisible_available_gpu_memory_; - // The current total memory usage, and historical maximum memory usage uint64 bytes_allocated_managed_current_; uint64 bytes_allocated_managed_visible_; @@ -278,12 +249,6 @@ class CONTENT_EXPORT GpuMemoryManager : // Update bytes_allocated_unmanaged_low/high_ in intervals of step_. uint64 bytes_unmanaged_limit_step_; - // The number of browser windows that exist. If we ever receive a - // GpuMsg_SetVideoMemoryWindowCount, then we use this to compute memory - // allocations, instead of doing more complicated stub-based calculations. - bool window_count_has_been_received_; - uint32 window_count_; - // Used to disable automatic changes to Manage() in testing. bool disable_schedule_manage_; diff --git a/content/common/gpu/gpu_memory_manager_unittest.cc b/content/common/gpu/gpu_memory_manager_unittest.cc index 1661e89..9cbb4b3 100644 --- a/content/common/gpu/gpu_memory_manager_unittest.cc +++ b/content/common/gpu/gpu_memory_manager_unittest.cc @@ -164,7 +164,6 @@ class GpuMemoryManagerTest : public testing::Test { GpuMemoryManagerTest() : memmgr_(0, kFrontbufferLimitForTest) { memmgr_.TestingDisableScheduleManage(); - memmgr_.TestingSetUseNonuniformMemoryPolicy(false); } virtual void SetUp() { @@ -178,23 +177,17 @@ class GpuMemoryManagerTest : public testing::Test { bool IsAllocationForegroundForSurfaceYes( const GpuMemoryAllocation& alloc) { return alloc.browser_allocation.suggest_have_frontbuffer && - !alloc.renderer_allocation.have_backbuffer_when_not_visible && - alloc.renderer_allocation.bytes_limit_when_visible >= - GetMinimumClientAllocation(); + !alloc.renderer_allocation.have_backbuffer_when_not_visible; } bool IsAllocationBackgroundForSurfaceYes( const GpuMemoryAllocation& alloc) { return alloc.browser_allocation.suggest_have_frontbuffer && - !alloc.renderer_allocation.have_backbuffer_when_not_visible && - alloc.renderer_allocation.bytes_limit_when_not_visible <= - memmgr_.GetCurrentNonvisibleAvailableGpuMemory(); + !alloc.renderer_allocation.have_backbuffer_when_not_visible; } bool IsAllocationHibernatedForSurfaceYes( const GpuMemoryAllocation& alloc) { return !alloc.browser_allocation.suggest_have_frontbuffer && - !alloc.renderer_allocation.have_backbuffer_when_not_visible && - alloc.renderer_allocation.bytes_limit_when_not_visible <= - memmgr_.GetCurrentNonvisibleAvailableGpuMemory(); + !alloc.renderer_allocation.have_backbuffer_when_not_visible; } bool IsAllocationForegroundForSurfaceNo( const GpuMemoryAllocation& alloc) { @@ -244,11 +237,6 @@ class GpuMemoryManagerTest : public testing::Test { return memmgr_.GetMinimumClientAllocation(); } - GpuMemoryManager memmgr_; -}; - -class GpuMemoryManagerTestNonuniform : public GpuMemoryManagerTest { - protected: void SetClientStats( FakeClient* client, uint64 required, @@ -256,6 +244,8 @@ class GpuMemoryManagerTestNonuniform : public GpuMemoryManagerTest { client->SetManagedMemoryStats( GpuManagedMemoryStats(required, nicetohave, 0, false)); } + + GpuMemoryManager memmgr_; }; // Test GpuMemoryManager::Manage basic functionality. @@ -717,171 +707,12 @@ TEST_F(GpuMemoryManagerTest, TestManagedUsageTracking) { EXPECT_EQ(4ul, memmgr_.bytes_allocated_managed_nonvisible_); } -// Test GpuMemoryManager's background cutoff threshoulds -TEST_F(GpuMemoryManagerTest, TestBackgroundCutoff) { - memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); - - FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); - - // stub1's requirements are not <=16, so it should just dump - // everything when it goes invisible. - stub1.SetManagedMemoryStats(GpuManagedMemoryStats(17, 24, 18, false)); - Manage(); - EXPECT_EQ(0ul, stub1.BytesWhenNotVisible()); - - // stub1 now fits, so it should have a full budget. - stub1.SetManagedMemoryStats(GpuManagedMemoryStats(16, 24, 18, false)); - Manage(); - EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_, - memmgr_.GetCurrentNonvisibleAvailableGpuMemory()); - EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(), - stub1.BytesWhenNotVisible()); - - // Background stub1. - stub1.SetManagedMemoryStats(GpuManagedMemoryStats(16, 24, 16, false)); - stub1.SetVisible(false); - - // Add stub2 that uses almost enough memory to evict - // stub1, but not quite. - FakeClient stub2(&memmgr_, GenerateUniqueSurfaceId(), true); - stub2.SetManagedMemoryStats(GpuManagedMemoryStats(16, 50, 48, false)); - Manage(); - EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_, - memmgr_.GetCurrentNonvisibleAvailableGpuMemory()); - EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(), - stub1.BytesWhenNotVisible()); - EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(), - stub2.BytesWhenNotVisible()); - - // Increase stub2 to force stub1 to be evicted. - stub2.SetManagedMemoryStats(GpuManagedMemoryStats(16, 50, 49, false)); - Manage(); - EXPECT_EQ(0ul, - stub1.BytesWhenNotVisible()); - EXPECT_EQ(memmgr_.GetCurrentNonvisibleAvailableGpuMemory(), - stub2.BytesWhenNotVisible()); -} - -// Test GpuMemoryManager's background MRU behavior -TEST_F(GpuMemoryManagerTest, TestBackgroundMru) { - memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); - - uint64 bytes_when_not_visible_expected = - memmgr_.GetCurrentNonvisibleAvailableGpuMemory(); -#if defined (OS_ANDROID) - bytes_when_not_visible_expected = 0; -#endif - - FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); - FakeClient stub2(&memmgr_, GenerateUniqueSurfaceId(), true); - FakeClient stub3(&memmgr_, GenerateUniqueSurfaceId(), true); - - // When all are visible, they should all be allowed to have memory - // should they become nonvisible. - stub1.SetManagedMemoryStats(GpuManagedMemoryStats(7, 24, 7, false)); - stub2.SetManagedMemoryStats(GpuManagedMemoryStats(7, 24, 7, false)); - stub3.SetManagedMemoryStats(GpuManagedMemoryStats(7, 24, 7, false)); - Manage(); - EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_, - memmgr_.GetCurrentNonvisibleAvailableGpuMemory()); - EXPECT_EQ(stub1.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - EXPECT_EQ(stub2.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - EXPECT_EQ(stub3.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - - // Background stubs 1 and 2, and they should fit - stub2.SetVisible(false); - stub1.SetVisible(false); - Manage(); - EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_, - bytes_when_not_visible_expected); - EXPECT_EQ(stub1.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - EXPECT_EQ(stub2.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - EXPECT_EQ(stub3.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - - // Now background stub 3, and it should cause stub 2 to be - // evicted because it was set non-visible first - stub3.SetVisible(false); - Manage(); - EXPECT_EQ(memmgr_.bytes_nonvisible_available_gpu_memory_, - memmgr_.GetCurrentNonvisibleAvailableGpuMemory()); - EXPECT_EQ(stub1.BytesWhenNotVisible(), - bytes_when_not_visible_expected); - EXPECT_EQ(stub2.BytesWhenNotVisible(), - 0ul); - EXPECT_EQ(stub3.BytesWhenNotVisible(), - bytes_when_not_visible_expected); -} - -// Test GpuMemoryManager's tracking of unmanaged (e.g, WebGL) memory. -TEST_F(GpuMemoryManagerTest, TestUnmanagedTracking) { - memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); - memmgr_.TestingSetUnmanagedLimitStep(16); - memmgr_.TestingSetMinimumClientAllocation(8); - - FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true); - - // Expect that the one stub get the maximum tab allocation. - Manage(); - EXPECT_EQ(memmgr_.GetMaximumClientAllocation(), - stub1.BytesWhenVisible()); - - // Now allocate some unmanaged memory and make sure the amount - // goes down. - memmgr_.TrackMemoryAllocatedChange( - stub1.tracking_group_.get(), - 0, - 48, - gpu::gles2::MemoryTracker::kUnmanaged); - Manage(); - EXPECT_GT(memmgr_.GetMaximumClientAllocation(), - stub1.BytesWhenVisible()); - - // Now allocate the entire FB worth of unmanaged memory, and - // make sure that we stay stuck at the minimum tab allocation. - memmgr_.TrackMemoryAllocatedChange( - stub1.tracking_group_.get(), - 48, - 64, - gpu::gles2::MemoryTracker::kUnmanaged); - Manage(); - EXPECT_EQ(memmgr_.GetMinimumClientAllocation(), - stub1.BytesWhenVisible()); - - // Far-oversubscribe the entire FB, and make sure we stay at - // the minimum allocation, and don't blow up. - memmgr_.TrackMemoryAllocatedChange( - stub1.tracking_group_.get(), - 64, - 999, - gpu::gles2::MemoryTracker::kUnmanaged); - Manage(); - EXPECT_EQ(memmgr_.GetMinimumClientAllocation(), - stub1.BytesWhenVisible()); - - // Delete all tracked memory so we don't hit leak checks. - memmgr_.TrackMemoryAllocatedChange( - stub1.tracking_group_.get(), - 999, - 0, - gpu::gles2::MemoryTracker::kUnmanaged); -} - // Test nonvisible MRU behavior (the most recently used nonvisible clients // keep their contents). -TEST_F(GpuMemoryManagerTestNonuniform, BackgroundMru) { - // Set memory manager constants for this test - memmgr_.TestingSetUseNonuniformMemoryPolicy(true); +TEST_F(GpuMemoryManagerTest, BackgroundMru) { + // Set memory manager constants for this test. Note that the budget + // for backgrounded content will be 64/4 = 16. memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); memmgr_.TestingSetMinimumClientAllocation(8); uint64 bytes_when_not_visible_expected = 6u; @@ -934,11 +765,10 @@ TEST_F(GpuMemoryManagerTestNonuniform, BackgroundMru) { // Test that once a backgrounded client has dropped its resources, it // doesn't get them back until it becomes visible again. -TEST_F(GpuMemoryManagerTestNonuniform, BackgroundDiscardPersistent) { - // Set memory manager constants for this test - memmgr_.TestingSetUseNonuniformMemoryPolicy(true); +TEST_F(GpuMemoryManagerTest, BackgroundDiscardPersistent) { + // Set memory manager constants for this test. Note that the budget + // for backgrounded content will be 64/4 = 16. memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); memmgr_.TestingSetMinimumClientAllocation(8); uint64 bytes_when_not_visible_expected = 10ul; @@ -976,11 +806,9 @@ TEST_F(GpuMemoryManagerTestNonuniform, BackgroundDiscardPersistent) { } // Test tracking of unmanaged (e.g, WebGL) memory. -TEST_F(GpuMemoryManagerTestNonuniform, UnmanagedTracking) { +TEST_F(GpuMemoryManagerTest, UnmanagedTracking) { // Set memory manager constants for this test - memmgr_.TestingSetUseNonuniformMemoryPolicy(true); memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); memmgr_.TestingSetMinimumClientAllocation(8); memmgr_.TestingSetUnmanagedLimitStep(16); @@ -1030,11 +858,9 @@ TEST_F(GpuMemoryManagerTestNonuniform, UnmanagedTracking) { } // Test the default allocation levels are used. -TEST_F(GpuMemoryManagerTestNonuniform, DefaultAllocation) { +TEST_F(GpuMemoryManagerTest, DefaultAllocation) { // Set memory manager constants for this test - memmgr_.TestingSetUseNonuniformMemoryPolicy(true); memmgr_.TestingSetAvailableGpuMemory(64); - memmgr_.TestingSetNonvisibleAvailableGpuMemory(16); memmgr_.TestingSetMinimumClientAllocation(8); memmgr_.TestingSetDefaultClientAllocation(16); diff --git a/content/common/gpu/gpu_memory_uma_stats.h b/content/common/gpu/gpu_memory_uma_stats.h index dbea5c2..8f67aeb 100644 --- a/content/common/gpu/gpu_memory_uma_stats.h +++ b/content/common/gpu/gpu_memory_uma_stats.h @@ -17,8 +17,7 @@ struct GPUMemoryUmaStats { bytes_allocated_max(0), bytes_limit(0), client_count(0), - context_group_count(0), - window_count(0) { + context_group_count(0) { } // The number of bytes currently allocated. diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h index 4cf7613..0c1ccdb 100644 --- a/content/common/gpu/gpu_messages.h +++ b/content/common/gpu/gpu_messages.h @@ -173,7 +173,6 @@ IPC_STRUCT_TRAITS_BEGIN(content::GPUMemoryUmaStats) IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_current) IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_max) IPC_STRUCT_TRAITS_MEMBER(bytes_limit) - IPC_STRUCT_TRAITS_MEMBER(window_count) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(content::GpuMemoryAllocationForRenderer) @@ -269,11 +268,6 @@ IPC_MESSAGE_CONTROL0(GpuMsg_CollectGraphicsInfo) // Tells the GPU process to report video_memory information for the task manager IPC_MESSAGE_CONTROL0(GpuMsg_GetVideoMemoryUsageStats) -// Tells the GPU process' memory manager how many visible windows there are, so -// it can partition memory amongst them. -IPC_MESSAGE_CONTROL1(GpuMsg_SetVideoMemoryWindowCount, - uint32 /* window_count */) - // Tells the GPU process that the browser process has finished resizing the // view. IPC_MESSAGE_ROUTED0(AcceleratedSurfaceMsg_ResizeViewACK) diff --git a/content/gpu/gpu_child_thread.cc b/content/gpu/gpu_child_thread.cc index 8a44f4c..b8ce0a9 100644 --- a/content/gpu/gpu_child_thread.cc +++ b/content/gpu/gpu_child_thread.cc @@ -105,8 +105,6 @@ bool GpuChildThread::OnControlMessageReceived(const IPC::Message& msg) { IPC_MESSAGE_HANDLER(GpuMsg_CollectGraphicsInfo, OnCollectGraphicsInfo) IPC_MESSAGE_HANDLER(GpuMsg_GetVideoMemoryUsageStats, OnGetVideoMemoryUsageStats) - IPC_MESSAGE_HANDLER(GpuMsg_SetVideoMemoryWindowCount, - OnSetVideoMemoryWindowCount) IPC_MESSAGE_HANDLER(GpuMsg_Clean, OnClean) IPC_MESSAGE_HANDLER(GpuMsg_Crash, OnCrash) IPC_MESSAGE_HANDLER(GpuMsg_Hang, OnHang) @@ -205,11 +203,6 @@ void GpuChildThread::OnGetVideoMemoryUsageStats() { Send(new GpuHostMsg_VideoMemoryUsageStats(video_memory_usage_stats)); } -void GpuChildThread::OnSetVideoMemoryWindowCount(uint32 window_count) { - if (gpu_channel_manager_.get()) - gpu_channel_manager_->gpu_memory_manager()->SetWindowCount(window_count); -} - void GpuChildThread::OnClean() { VLOG(1) << "GPU: Removing all contexts"; if (gpu_channel_manager_.get()) diff --git a/content/public/browser/gpu_data_manager.h b/content/public/browser/gpu_data_manager.h index 4490917..8acdda3 100644 --- a/content/public/browser/gpu_data_manager.h +++ b/content/public/browser/gpu_data_manager.h @@ -73,11 +73,6 @@ class GpuDataManager { virtual void AddObserver(GpuDataManagerObserver* observer) = 0; virtual void RemoveObserver(GpuDataManagerObserver* observer) = 0; - // Notifies the gpu process about the number of browser windows, so - // they can be used to determine managed memory allocation. - virtual void SetWindowCount(uint32 count) = 0; - virtual uint32 GetWindowCount() const = 0; - // Allows a given domain previously blocked from accessing 3D APIs // to access them again. virtual void UnblockDomainFrom3DAPIs(const GURL& url) = 0; diff --git a/gpu/command_buffer/service/gpu_switches.cc b/gpu/command_buffer/service/gpu_switches.cc index aebaf8b..d029cfb 100644 --- a/gpu/command_buffer/service/gpu_switches.cc +++ b/gpu/command_buffer/service/gpu_switches.cc @@ -45,11 +45,6 @@ const char kEnforceGLMinimums[] = "enforce-gl-minimums"; // affected systems. const char kForceGLFinishWorkaround[] = "force-glfinish-workaround"; -// Disable the nonuniform GPU memory memory and instead use the scheme that -// distributes memory uniformly to all visible renderers. -const char kDisableNonuniformGpuMemPolicy[] = - "disable-nonuniform-gpu-mem-policy"; - // Sets the total amount of memory that may be allocated for GPU resources const char kForceGpuMemAvailableMb[] = "force-gpu-mem-available-mb"; @@ -70,7 +65,6 @@ const char* kGpuSwitches[] = { kDisableGpuProgramCache, kEnforceGLMinimums, kForceGLFinishWorkaround, - kDisableNonuniformGpuMemPolicy, kForceGpuMemAvailableMb, kGpuProgramCacheSizeKb, kTraceGL, diff --git a/gpu/command_buffer/service/gpu_switches.h b/gpu/command_buffer/service/gpu_switches.h index 1314854..0a51e70 100644 --- a/gpu/command_buffer/service/gpu_switches.h +++ b/gpu/command_buffer/service/gpu_switches.h @@ -22,7 +22,6 @@ GPU_EXPORT extern const char kEnableGPUServiceLoggingGPU[]; GPU_EXPORT extern const char kDisableGpuProgramCache[]; GPU_EXPORT extern const char kEnforceGLMinimums[]; GPU_EXPORT extern const char kForceGLFinishWorkaround[]; -GPU_EXPORT extern const char kDisableNonuniformGpuMemPolicy[]; GPU_EXPORT extern const char kForceGpuMemAvailableMb[]; GPU_EXPORT extern const char kGpuProgramCacheSizeKb[]; GPU_EXPORT extern const char kTraceGL[]; |