summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsohan.jyoti <sohan.jyoti@samsung.com>2015-11-16 18:47:39 -0800
committerCommit bot <commit-bot@chromium.org>2015-11-17 02:48:39 +0000
commitd1650935866e4efc94cbb5d4ce89f0af97cc4da9 (patch)
tree796476ca74f0350d4ac97632ea84355c1503cbc2
parent2c5335508648c13f6a4e828ccb69c557bdfc20f9 (diff)
downloadchromium_src-d1650935866e4efc94cbb5d4ce89f0af97cc4da9.zip
chromium_src-d1650935866e4efc94cbb5d4ce89f0af97cc4da9.tar.gz
chromium_src-d1650935866e4efc94cbb5d4ce89f0af97cc4da9.tar.bz2
Cleanup GpuMemoryManager and helpers.
Now that gpu mem calculation has been moved to compositor, remove unused code. This removes GpuMemoryManagerClient and related unit tests. BUG=537563 Review URL: https://codereview.chromium.org/1420533009 Cr-Commit-Position: refs/heads/master@{#359984}
-rw-r--r--content/browser/gpu/gpu_process_host.cc5
-rw-r--r--content/common/gpu/gpu_channel_manager.cc4
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc29
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.h11
-rw-r--r--content/common/gpu/gpu_memory_manager.cc162
-rw-r--r--content/common/gpu/gpu_memory_manager.h100
-rw-r--r--content/common/gpu/gpu_memory_manager_client.cc34
-rw-r--r--content/common/gpu/gpu_memory_manager_client.h86
-rw-r--r--content/common/gpu/gpu_memory_manager_unittest.cc189
-rw-r--r--content/common/gpu/gpu_memory_uma_stats.h11
-rw-r--r--content/common/gpu/gpu_messages.h1
-rw-r--r--content/content_common.gypi2
-rw-r--r--content/content_tests.gypi1
-rw-r--r--gpu/command_buffer/service/gl_context_virtual.cc4
-rw-r--r--gpu/command_buffer/service/gl_context_virtual.h1
-rw-r--r--ui/gl/gl_context.cc6
-rw-r--r--ui/gl/gl_context.h5
-rw-r--r--ui/gl/gl_context_android.cc52
-rw-r--r--ui/gl/gl_context_cgl.cc53
-rw-r--r--ui/gl/gl_context_cgl.h1
-rw-r--r--ui/gl/gl_context_egl.cc8
-rw-r--r--ui/gl/gl_context_egl.h1
-rw-r--r--ui/gl/gl_context_glx.cc13
-rw-r--r--ui/gl/gl_context_glx.h1
24 files changed, 7 insertions, 773 deletions
diff --git a/content/browser/gpu/gpu_process_host.cc b/content/browser/gpu/gpu_process_host.cc
index 7cc4525..ba1f793 100644
--- a/content/browser/gpu/gpu_process_host.cc
+++ b/content/browser/gpu/gpu_process_host.cc
@@ -465,8 +465,6 @@ GpuProcessHost::~GpuProcessHost() {
uma_memory_stats_received_);
if (uma_memory_stats_received_) {
- UMA_HISTOGRAM_COUNTS_100("GPU.AtExitManagedMemoryClientCount",
- uma_memory_stats_.client_count);
UMA_HISTOGRAM_COUNTS_100("GPU.AtExitContextGroupCount",
uma_memory_stats_.context_group_count);
UMA_HISTOGRAM_CUSTOM_COUNTS(
@@ -475,9 +473,6 @@ GpuProcessHost::~GpuProcessHost() {
UMA_HISTOGRAM_CUSTOM_COUNTS(
"GPU.AtExitMBytesAllocatedMax",
uma_memory_stats_.bytes_allocated_max / 1024 / 1024, 1, 2000, 50);
- UMA_HISTOGRAM_CUSTOM_COUNTS(
- "GPU.AtExitMBytesLimit",
- uma_memory_stats_.bytes_limit / 1024 / 1024, 1, 2000, 50);
}
std::string message;
diff --git a/content/common/gpu/gpu_channel_manager.cc b/content/common/gpu/gpu_channel_manager.cc
index 3f603a6..60dbd9c 100644
--- a/content/common/gpu/gpu_channel_manager.cc
+++ b/content/common/gpu/gpu_channel_manager.cc
@@ -56,9 +56,7 @@ GpuChannelManager::GpuChannelManager(
shutdown_event_(shutdown_event),
share_group_(new gfx::GLShareGroup),
mailbox_manager_(gpu::gles2::MailboxManager::Create()),
- gpu_memory_manager_(
- this,
- GpuMemoryManager::kDefaultMaxSurfacesWithFrontbufferSoftLimit),
+ gpu_memory_manager_(this),
sync_point_manager_(sync_point_manager),
sync_point_client_waiter_(new gpu::SyncPointClientWaiter),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index 595b02f..39a75d2 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -149,10 +149,6 @@ DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
res->SetInteger("renderer_pid", channel->GetClientPID());
res->SetDouble("used_bytes", channel->GetMemoryUsage());
- res->SetDouble("limit_bytes",
- channel->gpu_channel_manager()
- ->gpu_memory_manager()
- ->GetMaximumClientAllocation());
return new DevToolsChannelData(res.release());
}
@@ -209,8 +205,7 @@ GpuCommandBufferStub::GpuCommandBufferStub(
waiting_for_sync_point_(false),
previous_processed_num_(0),
preemption_flag_(preempt_by_flag),
- active_url_(active_url),
- total_gpu_memory_(0) {
+ active_url_(active_url) {
active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
FastSetActiveURL(active_url_, active_url_hash_);
@@ -615,9 +610,6 @@ void GpuCommandBufferStub::OnInitialize(
new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
}
- if (!context->GetTotalGpuMemory(&total_gpu_memory_))
- total_gpu_memory_ = 0;
-
if (!context_group_->has_program_cache() &&
!context_group_->feature_info()->workarounds().disable_program_cache) {
context_group_->set_program_cache(
@@ -1203,17 +1195,6 @@ void GpuCommandBufferStub::RemoveDestructionObserver(
destruction_observers_.RemoveObserver(observer);
}
-bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
- *bytes = total_gpu_memory_;
- return !!total_gpu_memory_;
-}
-
-gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
- if (!surface_.get())
- return gfx::Size();
- return surface_->GetSize();
-}
-
const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
return context_group_->feature_info();
}
@@ -1222,14 +1203,6 @@ gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
return context_group_->memory_tracker();
}
-void GpuCommandBufferStub::SuggestHaveFrontBuffer(
- bool suggest_have_frontbuffer) {
- // This can be called outside of OnMessageReceived, so the context needs
- // to be made current before calling methods on the surface.
- if (surface_.get() && MakeCurrent())
- surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
-}
-
bool GpuCommandBufferStub::CheckContextLost() {
DCHECK(command_buffer_);
gpu::CommandBuffer::State state = command_buffer_->GetLastState();
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index 0537c30..7c45aeb 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -14,7 +14,6 @@
#include "base/time/time.h"
#include "content/common/content_export.h"
#include "content/common/gpu/gpu_memory_manager.h"
-#include "content/common/gpu/gpu_memory_manager_client.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/gpu_memory_allocation.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
@@ -55,8 +54,7 @@ class GpuWatchdog;
struct WaitForCommandState;
class GpuCommandBufferStub
- : public GpuMemoryManagerClient,
- public IPC::Listener,
+ : public IPC::Listener,
public IPC::Sender,
public base::SupportsWeakPtr<GpuCommandBufferStub> {
public:
@@ -100,11 +98,7 @@ class GpuCommandBufferStub
// IPC::Sender implementation:
bool Send(IPC::Message* msg) override;
- // GpuMemoryManagerClient implementation:
- gfx::Size GetSurfaceSize() const override;
- gpu::gles2::MemoryTracker* GetMemoryTracker() const override;
- void SuggestHaveFrontBuffer(bool suggest_have_frontbuffer) override;
- bool GetTotalGpuMemory(uint64* bytes) override;
+ gpu::gles2::MemoryTracker* GetMemoryTracker() const;
// Whether this command buffer can currently handle IPC messages.
bool IsScheduled();
@@ -299,7 +293,6 @@ class GpuCommandBufferStub
GURL active_url_;
size_t active_url_hash_;
- size_t total_gpu_memory_;
scoped_ptr<WaitForCommandState> wait_for_token_;
scoped_ptr<WaitForCommandState> wait_for_get_offset_;
diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc
index 5f21fec..3dae2b1 100644
--- a/content/common/gpu/gpu_memory_manager.cc
+++ b/content/common/gpu/gpu_memory_manager.cc
@@ -13,7 +13,6 @@
#include "base/strings/string_number_conversions.h"
#include "base/trace_event/trace_event.h"
#include "content/common/gpu/gpu_channel_manager.h"
-#include "content/common/gpu/gpu_memory_manager_client.h"
#include "content/common/gpu/gpu_memory_tracking.h"
#include "content/common/gpu/gpu_memory_uma_stats.h"
#include "content/common/gpu/gpu_messages.h"
@@ -25,8 +24,6 @@ using gpu::MemoryAllocation;
namespace content {
namespace {
-const int kDelayedScheduleManageTimeoutMs = 67;
-
const uint64 kBytesAllocatedStep = 16 * 1024 * 1024;
void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
@@ -36,50 +33,16 @@ void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
}
-GpuMemoryManager::GpuMemoryManager(
- GpuChannelManager* channel_manager,
- uint64 max_surfaces_with_frontbuffer_soft_limit)
+GpuMemoryManager::GpuMemoryManager(GpuChannelManager* channel_manager)
: channel_manager_(channel_manager),
- manage_immediate_scheduled_(false),
- disable_schedule_manage_(false),
- max_surfaces_with_frontbuffer_soft_limit_(
- max_surfaces_with_frontbuffer_soft_limit),
- client_hard_limit_bytes_(0),
bytes_allocated_current_(0),
- bytes_allocated_historical_max_(0)
-{ }
+ bytes_allocated_historical_max_(0) {}
GpuMemoryManager::~GpuMemoryManager() {
DCHECK(tracking_groups_.empty());
- DCHECK(clients_visible_mru_.empty());
- DCHECK(clients_nonvisible_mru_.empty());
- DCHECK(clients_nonsurface_.empty());
DCHECK(!bytes_allocated_current_);
}
-void GpuMemoryManager::ScheduleManage(
- ScheduleManageTime schedule_manage_time) {
- if (disable_schedule_manage_)
- return;
- if (manage_immediate_scheduled_)
- return;
- if (schedule_manage_time == kScheduleManageNow) {
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
- manage_immediate_scheduled_ = true;
- if (!delayed_manage_callback_.IsCancelled())
- delayed_manage_callback_.Cancel();
- } else {
- if (!delayed_manage_callback_.IsCancelled())
- return;
- delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage,
- AsWeakPtr()));
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, delayed_manage_callback_.callback(),
- base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs));
- }
-}
-
void GpuMemoryManager::TrackMemoryAllocatedChange(
GpuMemoryTrackingGroup* tracking_group,
uint64 old_size,
@@ -101,40 +64,6 @@ bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) {
return true;
}
-GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState(
- GpuMemoryManagerClient* client,
- bool has_surface,
- bool visible) {
- TrackingGroupMap::iterator tracking_group_it =
- tracking_groups_.find(client->GetMemoryTracker());
- DCHECK(tracking_group_it != tracking_groups_.end());
- GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second;
-
- GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState(
- this, client, tracking_group, has_surface, visible);
- AddClientToList(client_state);
- ScheduleManage(kScheduleManageNow);
- return client_state;
-}
-
-void GpuMemoryManager::OnDestroyClientState(
- GpuMemoryManagerClientState* client_state) {
- RemoveClientFromList(client_state);
- ScheduleManage(kScheduleManageLater);
-}
-
-void GpuMemoryManager::SetClientStateVisible(
- GpuMemoryManagerClientState* client_state, bool visible) {
- DCHECK(client_state->has_surface_);
- if (client_state->visible_ == visible)
- return;
-
- RemoveClientFromList(client_state);
- client_state->visible_ = visible;
- AddClientToList(client_state);
- ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
-}
-
uint64 GpuMemoryManager::GetTrackerMemoryUsage(
gpu::gles2::MemoryTracker* tracker) const {
TrackingGroupMap::const_iterator tracking_group_it =
@@ -181,100 +110,13 @@ void GpuMemoryManager::GetVideoMemoryUsageStats(
bytes_allocated_historical_max_;
}
-void GpuMemoryManager::Manage() {
- manage_immediate_scheduled_ = false;
- delayed_manage_callback_.Cancel();
-
- // Determine which clients are "hibernated" (which determines the
- // distribution of frontbuffers and memory among clients that don't have
- // surfaces).
- SetClientsHibernatedState();
-
- SendUmaStatsToBrowser();
-}
-
-void GpuMemoryManager::SetClientsHibernatedState() const {
- // Re-set all tracking groups as being hibernated.
- for (TrackingGroupMap::const_iterator it = tracking_groups_.begin();
- it != tracking_groups_.end();
- ++it) {
- GpuMemoryTrackingGroup* tracking_group = it->second;
- tracking_group->hibernated_ = true;
- }
- // All clients with surfaces that are visible are non-hibernated.
- uint64 non_hibernated_clients = 0;
- for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
- it != clients_visible_mru_.end();
- ++it) {
- GpuMemoryManagerClientState* client_state = *it;
- client_state->hibernated_ = false;
- client_state->tracking_group_->hibernated_ = false;
- non_hibernated_clients++;
- }
- // Then an additional few clients with surfaces are non-hibernated too, up to
- // a fixed limit.
- for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
- it != clients_nonvisible_mru_.end();
- ++it) {
- GpuMemoryManagerClientState* client_state = *it;
- if (non_hibernated_clients < max_surfaces_with_frontbuffer_soft_limit_) {
- client_state->hibernated_ = false;
- client_state->tracking_group_->hibernated_ = false;
- non_hibernated_clients++;
- } else {
- client_state->hibernated_ = true;
- }
- }
- // Clients that don't have surfaces are non-hibernated if they are
- // in a GL share group with a non-hibernated surface.
- for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
- it != clients_nonsurface_.end();
- ++it) {
- GpuMemoryManagerClientState* client_state = *it;
- client_state->hibernated_ = client_state->tracking_group_->hibernated_;
- }
-}
-
void GpuMemoryManager::SendUmaStatsToBrowser() {
if (!channel_manager_)
return;
GPUMemoryUmaStats params;
params.bytes_allocated_current = GetCurrentUsage();
params.bytes_allocated_max = bytes_allocated_historical_max_;
- params.bytes_limit = client_hard_limit_bytes_;
- params.client_count = clients_visible_mru_.size() +
- clients_nonvisible_mru_.size() +
- clients_nonsurface_.size();
params.context_group_count = tracking_groups_.size();
channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
}
-
-GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
- GpuMemoryManagerClientState* client_state) {
- if (client_state->has_surface_) {
- if (client_state->visible_)
- return &clients_visible_mru_;
- else
- return &clients_nonvisible_mru_;
- }
- return &clients_nonsurface_;
-}
-
-void GpuMemoryManager::AddClientToList(
- GpuMemoryManagerClientState* client_state) {
- DCHECK(!client_state->list_iterator_valid_);
- ClientStateList* client_list = GetClientList(client_state);
- client_state->list_iterator_ = client_list->insert(
- client_list->begin(), client_state);
- client_state->list_iterator_valid_ = true;
-}
-
-void GpuMemoryManager::RemoveClientFromList(
- GpuMemoryManagerClientState* client_state) {
- DCHECK(client_state->list_iterator_valid_);
- ClientStateList* client_list = GetClientList(client_state);
- client_list->erase(client_state->list_iterator_);
- client_state->list_iterator_valid_ = false;
-}
-
} // namespace content
diff --git a/content/common/gpu/gpu_memory_manager.h b/content/common/gpu/gpu_memory_manager.h
index cdfd30d..a5f7655 100644
--- a/content/common/gpu/gpu_memory_manager.h
+++ b/content/common/gpu/gpu_memory_manager.h
@@ -21,98 +21,31 @@
namespace content {
class GpuChannelManager;
-class GpuMemoryManagerClient;
-class GpuMemoryManagerClientState;
class GpuMemoryTrackingGroup;
class CONTENT_EXPORT GpuMemoryManager :
public base::SupportsWeakPtr<GpuMemoryManager> {
public:
-#if defined(OS_ANDROID) || (defined(OS_LINUX) && !defined(OS_CHROMEOS))
- enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 1 };
-#else
- enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
-#endif
- enum ScheduleManageTime {
- // Add a call to Manage to the thread's message loop immediately.
- kScheduleManageNow,
- // Add a Manage call to the thread's message loop for execution 1/60th of
- // of a second from now.
- kScheduleManageLater,
- };
-
- GpuMemoryManager(GpuChannelManager* channel_manager,
- uint64 max_surfaces_with_frontbuffer_soft_limit);
+ explicit GpuMemoryManager(GpuChannelManager* channel_manager);
~GpuMemoryManager();
- // Schedule a Manage() call. If immediate is true, we PostTask without delay.
- // Otherwise PostDelayedTask using a CancelableClosure and allow multiple
- // delayed calls to "queue" up. This way, we do not spam clients in certain
- // lower priority situations. An immediate schedule manage will cancel any
- // queued delayed manage.
- void ScheduleManage(ScheduleManageTime schedule_manage_time);
-
// Retrieve GPU Resource consumption statistics for the task manager
void GetVideoMemoryUsageStats(
content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const;
- GpuMemoryManagerClientState* CreateClientState(
- GpuMemoryManagerClient* client, bool has_surface, bool visible);
-
GpuMemoryTrackingGroup* CreateTrackingGroup(
base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);
uint64 GetTrackerMemoryUsage(gpu::gles2::MemoryTracker* tracker) const;
- uint64 GetMaximumClientAllocation() const {
- return client_hard_limit_bytes_;
- }
private:
friend class GpuMemoryManagerTest;
friend class GpuMemoryTrackingGroup;
friend class GpuMemoryManagerClientState;
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManageBasicFunctionality);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManageChangingVisibility);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManageManyVisibleStubs);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManageManyNotVisibleStubs);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManageChangingLastUsedTime);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManageChangingImportanceShareGroup);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestForegroundStubsGetBonusAllocation);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestUpdateAvailableGpuMemory);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- GpuMemoryAllocationCompareTests);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- StubMemoryStatsForLastManageTests);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- TestManagedUsageTracking);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- BackgroundMru);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- AllowNonvisibleMemory);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- BackgroundDiscardPersistent);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- UnmanagedTracking);
- FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
- DefaultAllocation);
-
typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*>
TrackingGroupMap;
- typedef std::list<GpuMemoryManagerClientState*> ClientStateList;
-
- void Manage();
- void SetClientsHibernatedState() const;
-
// Send memory usage stats to the browser process.
void SendUmaStatsToBrowser();
@@ -129,42 +62,11 @@ class CONTENT_EXPORT GpuMemoryManager :
void OnDestroyTrackingGroup(GpuMemoryTrackingGroup* tracking_group);
bool EnsureGPUMemoryAvailable(uint64 size_needed);
- // GpuMemoryManagerClientState interface
- void SetClientStateVisible(
- GpuMemoryManagerClientState* client_state, bool visible);
- void OnDestroyClientState(GpuMemoryManagerClientState* client);
-
- // Add or remove a client from its clients list (visible, nonvisible, or
- // nonsurface). When adding the client, add it to the front of the list.
- void AddClientToList(GpuMemoryManagerClientState* client_state);
- void RemoveClientFromList(GpuMemoryManagerClientState* client_state);
- ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state);
-
- // Interfaces for testing
- void TestingDisableScheduleManage() { disable_schedule_manage_ = true; }
-
GpuChannelManager* channel_manager_;
- // A list of all visible and nonvisible clients, in most-recently-used
- // order (most recently used is first).
- ClientStateList clients_visible_mru_;
- ClientStateList clients_nonvisible_mru_;
-
- // A list of all clients that don't have a surface.
- ClientStateList clients_nonsurface_;
-
// All context groups' tracking structures
TrackingGroupMap tracking_groups_;
- base::CancelableClosure delayed_manage_callback_;
- bool manage_immediate_scheduled_;
- bool disable_schedule_manage_;
-
- uint64 max_surfaces_with_frontbuffer_soft_limit_;
-
- // The maximum amount of memory that may be allocated for a single client.
- uint64 client_hard_limit_bytes_;
-
// The current total memory usage, and historical maximum memory usage
uint64 bytes_allocated_current_;
uint64 bytes_allocated_historical_max_;
diff --git a/content/common/gpu/gpu_memory_manager_client.cc b/content/common/gpu/gpu_memory_manager_client.cc
deleted file mode 100644
index 30d5434..0000000
--- a/content/common/gpu/gpu_memory_manager_client.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_manager_client.h"
-
-#include "content/common/gpu/gpu_memory_manager.h"
-
-namespace content {
-
-GpuMemoryManagerClientState::GpuMemoryManagerClientState(
- GpuMemoryManager* memory_manager,
- GpuMemoryManagerClient* client,
- GpuMemoryTrackingGroup* tracking_group,
- bool has_surface,
- bool visible)
- : memory_manager_(memory_manager),
- client_(client),
- tracking_group_(tracking_group),
- has_surface_(has_surface),
- visible_(visible),
- list_iterator_valid_(false),
- hibernated_(false) {
-}
-
-GpuMemoryManagerClientState::~GpuMemoryManagerClientState() {
- memory_manager_->OnDestroyClientState(this);
-}
-
-void GpuMemoryManagerClientState::SetVisible(bool visible) {
- memory_manager_->SetClientStateVisible(this, visible);
-}
-
-} // namespace content
diff --git a/content/common/gpu/gpu_memory_manager_client.h b/content/common/gpu/gpu_memory_manager_client.h
deleted file mode 100644
index ae06053..0000000
--- a/content/common/gpu/gpu_memory_manager_client.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_CLIENT_H_
-#define CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_CLIENT_H_
-
-#include <list>
-
-#include "base/basictypes.h"
-#include "content/common/content_export.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "gpu/command_buffer/service/memory_tracking.h"
-#include "ui/gfx/geometry/size.h"
-
-namespace content {
-
-class GpuMemoryManager;
-class GpuMemoryTrackingGroup;
-
-// The interface that the GPU memory manager uses to manipulate a client (to
-// send it allocation information and query its properties).
-class CONTENT_EXPORT GpuMemoryManagerClient {
- public:
- virtual ~GpuMemoryManagerClient() {}
-
- // Returns surface size.
- virtual gfx::Size GetSurfaceSize() const = 0;
-
- // Returns the memory tracker for this stub.
- virtual gpu::gles2::MemoryTracker* GetMemoryTracker() const = 0;
-
- virtual void SuggestHaveFrontBuffer(bool suggest_have_frontbuffer) = 0;
-
- // Returns in bytes the total amount of GPU memory for the GPU which this
- // context is currently rendering on. Returns false if no extension exists
- // to get the exact amount of GPU memory.
- virtual bool GetTotalGpuMemory(uint64* bytes) = 0;
-};
-
-// The state associated with a GPU memory manager client. This acts as the
-// handle through which the client interacts with the GPU memory manager.
-class CONTENT_EXPORT GpuMemoryManagerClientState {
- public:
- ~GpuMemoryManagerClientState();
- void SetVisible(bool visible);
-
- private:
- friend class GpuMemoryManager;
-
- GpuMemoryManagerClientState(GpuMemoryManager* memory_manager,
- GpuMemoryManagerClient* client,
- GpuMemoryTrackingGroup* tracking_group,
- bool has_surface,
- bool visible);
-
- // The memory manager this client is hanging off of.
- GpuMemoryManager* memory_manager_;
-
- // The client to send allocations to.
- GpuMemoryManagerClient* client_;
-
- // The tracking group for this client.
- GpuMemoryTrackingGroup* tracking_group_;
-
- // Offscreen commandbuffers will not have a surface.
- const bool has_surface_;
-
- // Whether or not this client is visible.
- bool visible_;
-
- // If the client has a surface, then this is an iterator in the
- // clients_visible_mru_ if this client is visible and
- // clients_nonvisible_mru_ if this is non-visible. Otherwise this is an
- // iterator in clients_nonsurface_.
- std::list<GpuMemoryManagerClientState*>::iterator list_iterator_;
- bool list_iterator_valid_;
-
- // Set to disable allocating a frontbuffer or to disable allocations
- // for clients that don't have surfaces.
- bool hibernated_;
-};
-
-} // namespace content
-
-#endif // CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_CLIENT_H_
diff --git a/content/common/gpu/gpu_memory_manager_unittest.cc b/content/common/gpu/gpu_memory_manager_unittest.cc
deleted file mode 100644
index 3b6653b..0000000
--- a/content/common/gpu/gpu_memory_manager_unittest.cc
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/common/gpu/gpu_memory_manager.h"
-
-#include "content/common/gpu/gpu_memory_manager_client.h"
-#include "content/common/gpu/gpu_memory_tracking.h"
-#include "gpu/command_buffer/common/gpu_memory_allocation.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gfx/geometry/size_conversions.h"
-
-using gpu::MemoryAllocation;
-
-class FakeMemoryTracker : public gpu::gles2::MemoryTracker {
- public:
- void TrackMemoryAllocatedChange(
- size_t /* old_size */,
- size_t /* new_size */) override {}
- bool EnsureGPUMemoryAvailable(size_t /* size_needed */) override {
- return true;
- }
- uint64_t ClientTracingId() const override { return 0; }
- int ClientId() const override { return 0; }
- uint64_t ShareGroupTracingGUID() const override { return 0; }
-
- private:
- ~FakeMemoryTracker() override {}
-};
-
-namespace content {
-
-// This class is used to collect all stub assignments during a
-// Manage() call.
-class ClientAssignmentCollector {
- public:
- struct ClientMemoryStat {
- MemoryAllocation allocation;
- };
- typedef base::hash_map<GpuMemoryManagerClient*, ClientMemoryStat>
- ClientMemoryStatMap;
-
- static const ClientMemoryStatMap& GetClientStatsForLastManage() {
- return client_memory_stats_for_last_manage_;
- }
- static void ClearAllStats() {
- client_memory_stats_for_last_manage_.clear();
- }
- static void AddClientStat(GpuMemoryManagerClient* client,
- const MemoryAllocation& allocation) {
- DCHECK(!client_memory_stats_for_last_manage_.count(client));
- client_memory_stats_for_last_manage_[client].allocation = allocation;
- }
-
- private:
- static ClientMemoryStatMap client_memory_stats_for_last_manage_;
-};
-
-ClientAssignmentCollector::ClientMemoryStatMap
- ClientAssignmentCollector::client_memory_stats_for_last_manage_;
-
-class FakeClient : public GpuMemoryManagerClient {
- public:
- GpuMemoryManager* memmgr_;
- bool suggest_have_frontbuffer_;
- MemoryAllocation allocation_;
- uint64 total_gpu_memory_;
- gfx::Size surface_size_;
- GpuMemoryManagerClient* share_group_;
- scoped_refptr<gpu::gles2::MemoryTracker> memory_tracker_;
- scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
- scoped_ptr<GpuMemoryManagerClientState> client_state_;
-
- // This will create a client with no surface
- FakeClient(GpuMemoryManager* memmgr, GpuMemoryManagerClient* share_group)
- : memmgr_(memmgr),
- suggest_have_frontbuffer_(false),
- total_gpu_memory_(0),
- share_group_(share_group),
- memory_tracker_(NULL) {
- if (!share_group_) {
- memory_tracker_ = new FakeMemoryTracker();
- tracking_group_.reset(
- memmgr_->CreateTrackingGroup(0, memory_tracker_.get()));
- }
- client_state_.reset(memmgr_->CreateClientState(this, false, true));
- }
-
- // This will create a client with a surface
- FakeClient(GpuMemoryManager* memmgr, int32 surface_id, bool visible)
- : memmgr_(memmgr),
- suggest_have_frontbuffer_(false),
- total_gpu_memory_(0),
- share_group_(NULL),
- memory_tracker_(NULL) {
- memory_tracker_ = new FakeMemoryTracker();
- tracking_group_.reset(
- memmgr_->CreateTrackingGroup(0, memory_tracker_.get()));
- client_state_.reset(
- memmgr_->CreateClientState(this, surface_id != 0, visible));
- }
-
- ~FakeClient() override {
- client_state_.reset();
- tracking_group_.reset();
- memory_tracker_ = NULL;
- }
-
- void SuggestHaveFrontBuffer(bool suggest_have_frontbuffer) override {
- suggest_have_frontbuffer_ = suggest_have_frontbuffer;
- }
-
- bool GetTotalGpuMemory(uint64* bytes) override {
- if (total_gpu_memory_) {
- *bytes = total_gpu_memory_;
- return true;
- }
- return false;
- }
- void SetTotalGpuMemory(uint64 bytes) { total_gpu_memory_ = bytes; }
-
- gpu::gles2::MemoryTracker* GetMemoryTracker() const override {
- if (share_group_)
- return share_group_->GetMemoryTracker();
- return memory_tracker_.get();
- }
-
- gfx::Size GetSurfaceSize() const override { return surface_size_; }
- void SetSurfaceSize(gfx::Size size) { surface_size_ = size; }
-
- void SetVisible(bool visible) {
- client_state_->SetVisible(visible);
- }
-
- uint64 BytesWhenVisible() const {
- return allocation_.bytes_limit_when_visible;
- }
-};
-
-class GpuMemoryManagerTest : public testing::Test {
- protected:
- static const uint64 kFrontbufferLimitForTest = 3;
-
- GpuMemoryManagerTest()
- : memmgr_(0, kFrontbufferLimitForTest) {
- memmgr_.TestingDisableScheduleManage();
- }
-
- void SetUp() override {}
-
- static int32 GenerateUniqueSurfaceId() {
- static int32 surface_id_ = 1;
- return surface_id_++;
- }
-
- bool IsAllocationForegroundForSurfaceYes(
- const MemoryAllocation& alloc) {
- return true;
- }
- bool IsAllocationBackgroundForSurfaceYes(
- const MemoryAllocation& alloc) {
- return true;
- }
- bool IsAllocationHibernatedForSurfaceYes(
- const MemoryAllocation& alloc) {
- return true;
- }
- bool IsAllocationForegroundForSurfaceNo(
- const MemoryAllocation& alloc) {
- return alloc.bytes_limit_when_visible != 0;
- }
- bool IsAllocationBackgroundForSurfaceNo(
- const MemoryAllocation& alloc) {
- return alloc.bytes_limit_when_visible != 0;
- }
- bool IsAllocationHibernatedForSurfaceNo(
- const MemoryAllocation& alloc) {
- return alloc.bytes_limit_when_visible == 0;
- }
-
- void Manage() {
- ClientAssignmentCollector::ClearAllStats();
- memmgr_.Manage();
- }
-
- GpuMemoryManager memmgr_;
-};
-
-} // namespace content
diff --git a/content/common/gpu/gpu_memory_uma_stats.h b/content/common/gpu/gpu_memory_uma_stats.h
index 467619d..f20376b 100644
--- a/content/common/gpu/gpu_memory_uma_stats.h
+++ b/content/common/gpu/gpu_memory_uma_stats.h
@@ -15,8 +15,6 @@ struct GPUMemoryUmaStats {
GPUMemoryUmaStats()
: bytes_allocated_current(0),
bytes_allocated_max(0),
- bytes_limit(0),
- client_count(0),
context_group_count(0) {
}
@@ -26,17 +24,8 @@ struct GPUMemoryUmaStats {
// The maximum number of bytes ever allocated at once.
size_t bytes_allocated_max;
- // The memory limit being imposed by the memory manager.
- size_t bytes_limit;
-
- // The number of managed memory clients.
- size_t client_count;
-
// The number of context groups.
size_t context_group_count;
-
- // The number of visible windows.
- uint32 window_count;
};
} // namespace content
diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h
index 91803c2..ecb1979 100644
--- a/content/common/gpu/gpu_messages.h
+++ b/content/common/gpu/gpu_messages.h
@@ -274,7 +274,6 @@ IPC_STRUCT_TRAITS_END()
IPC_STRUCT_TRAITS_BEGIN(content::GPUMemoryUmaStats)
IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_current)
IPC_STRUCT_TRAITS_MEMBER(bytes_allocated_max)
- IPC_STRUCT_TRAITS_MEMBER(bytes_limit)
IPC_STRUCT_TRAITS_END()
IPC_STRUCT_TRAITS_BEGIN(gpu::MemoryAllocation)
diff --git a/content/content_common.gypi b/content/content_common.gypi
index 7a04d5e..b24ce41 100644
--- a/content/content_common.gypi
+++ b/content/content_common.gypi
@@ -326,8 +326,6 @@
'common/gpu/gpu_memory_buffer_factory_ozone_native_pixmap.h',
'common/gpu/gpu_memory_manager.cc',
'common/gpu/gpu_memory_manager.h',
- 'common/gpu/gpu_memory_manager_client.cc',
- 'common/gpu/gpu_memory_manager_client.h',
'common/gpu/gpu_memory_tracking.cc',
'common/gpu/gpu_memory_tracking.h',
'common/gpu/gpu_memory_uma_stats.h',
diff --git a/content/content_tests.gypi b/content/content_tests.gypi
index 929915b..64d2110 100644
--- a/content/content_tests.gypi
+++ b/content/content_tests.gypi
@@ -649,7 +649,6 @@
'common/gpu/gpu_channel_test_common.h',
'common/gpu/gpu_channel_unittest.cc',
'common/gpu/gpu_channel_manager_unittest.cc',
- 'common/gpu/gpu_memory_manager_unittest.cc',
'common/host_discardable_shared_memory_manager_unittest.cc',
'common/host_shared_bitmap_manager_unittest.cc',
'common/indexed_db/indexed_db_key_unittest.cc',
diff --git a/gpu/command_buffer/service/gl_context_virtual.cc b/gpu/command_buffer/service/gl_context_virtual.cc
index 563e601..77341a9 100644
--- a/gpu/command_buffer/service/gl_context_virtual.cc
+++ b/gpu/command_buffer/service/gl_context_virtual.cc
@@ -91,10 +91,6 @@ std::string GLContextVirtual::GetExtensions() {
return shared_context_->GetExtensions();
}
-bool GLContextVirtual::GetTotalGpuMemory(size_t* bytes) {
- return shared_context_->GetTotalGpuMemory(bytes);
-}
-
void GLContextVirtual::SetSafeToForceGpuSwitch() {
// TODO(ccameron): This will not work if two contexts that disagree
// about whether or not forced gpu switching may be done both share
diff --git a/gpu/command_buffer/service/gl_context_virtual.h b/gpu/command_buffer/service/gl_context_virtual.h
index 1403cf0..47710a4 100644
--- a/gpu/command_buffer/service/gl_context_virtual.h
+++ b/gpu/command_buffer/service/gl_context_virtual.h
@@ -43,7 +43,6 @@ class GPU_EXPORT GLContextVirtual : public gfx::GLContext {
scoped_refptr<gfx::GPUTimingClient> CreateGPUTimingClient() override;
void OnSetSwapInterval(int interval) override;
std::string GetExtensions() override;
- bool GetTotalGpuMemory(size_t* bytes) override;
void SetSafeToForceGpuSwitch() override;
bool WasAllocatedUsingRobustnessExtension() override;
void SetUnbindFboOnMakeCurrent() override;
diff --git a/ui/gl/gl_context.cc b/ui/gl/gl_context.cc
index 42dadd2..3e54ebb 100644
--- a/ui/gl/gl_context.cc
+++ b/ui/gl/gl_context.cc
@@ -66,12 +66,6 @@ GLContext::~GLContext() {
}
}
-bool GLContext::GetTotalGpuMemory(size_t* bytes) {
- DCHECK(bytes);
- *bytes = 0;
- return false;
-}
-
void GLContext::SetSafeToForceGpuSwitch() {
}
diff --git a/ui/gl/gl_context.h b/ui/gl/gl_context.h
index d391deb..f7b971d 100644
--- a/ui/gl/gl_context.h
+++ b/ui/gl/gl_context.h
@@ -74,11 +74,6 @@ class GL_EXPORT GLContext : public base::RefCounted<GLContext> {
// Returns space separated list of extensions. The context must be current.
virtual std::string GetExtensions();
- // Returns in bytes the total amount of GPU memory for the GPU which this
- // context is currently rendering on. Returns false if no extension exists
- // to get the exact amount of GPU memory.
- virtual bool GetTotalGpuMemory(size_t* bytes);
-
// Indicate that it is safe to force this context to switch GPUs, since
// transitioning can cause corruption and hangs (OS X only).
virtual void SetSafeToForceGpuSwitch();
diff --git a/ui/gl/gl_context_android.cc b/ui/gl/gl_context_android.cc
index ebf1dda..46f281b4 100644
--- a/ui/gl/gl_context_android.cc
+++ b/ui/gl/gl_context_android.cc
@@ -95,56 +95,4 @@ scoped_refptr<GLContext> GLContext::CreateGLContext(
return context;
}
-bool GLContextEGL::GetTotalGpuMemory(size_t* bytes) {
- DCHECK(bytes);
- *bytes = 0;
-
- // We can't query available GPU memory from the system on Android.
- // Physical memory is also mis-reported sometimes (eg. Nexus 10 reports
- // 1262MB when it actually has 2GB, while Razr M has 1GB but only reports
- // 128MB java heap size). First we estimate physical memory using both.
- size_t dalvik_mb = base::SysInfo::DalvikHeapSizeMB();
- size_t physical_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
- size_t physical_memory_mb = 0;
- if (dalvik_mb >= 256)
- physical_memory_mb = dalvik_mb * 4;
- else
- physical_memory_mb = std::max(dalvik_mb * 4,
- (physical_mb * 4) / 3);
-
- // Now we take a default of 1/8th of memory on high-memory devices,
- // and gradually scale that back for low-memory devices (to be nicer
- // to other apps so they don't get killed). Examples:
- // Nexus 4/10(2GB) 256MB (normally 128MB)
- // Droid Razr M(1GB) 114MB (normally 57MB)
- // Galaxy Nexus(1GB) 100MB (normally 50MB)
- // Xoom(1GB) 100MB (normally 50MB)
- // Nexus S(low-end) 8MB (normally 8MB)
- // Note that the compositor now uses only some of this memory for
- // pre-painting and uses the rest only for 'emergencies'.
- static size_t limit_bytes = 0;
- if (limit_bytes == 0) {
- // NOTE: Non-low-end devices use only 50% of these limits,
- // except during 'emergencies' where 100% can be used.
- if (!base::SysInfo::IsLowEndDevice()) {
- if (physical_memory_mb >= 1536)
- limit_bytes = physical_memory_mb / 8; // >192MB
- else if (physical_memory_mb >= 1152)
- limit_bytes = physical_memory_mb / 8; // >144MB
- else if (physical_memory_mb >= 768)
- limit_bytes = physical_memory_mb / 10; // >76MB
- else
- limit_bytes = physical_memory_mb / 12; // <64MB
- } else {
- // Low-end devices have 512MB or less memory by definition
- // so we hard code the limit rather than relying on the heuristics
- // above. Low-end devices use 4444 textures so we can use a lower limit.
- limit_bytes = 8;
- }
- limit_bytes = limit_bytes * 1024 * 1024;
- }
- *bytes = limit_bytes;
- return true;
-}
-
}
diff --git a/ui/gl/gl_context_cgl.cc b/ui/gl/gl_context_cgl.cc
index 6de014e..147e4b1 100644
--- a/ui/gl/gl_context_cgl.cc
+++ b/ui/gl/gl_context_cgl.cc
@@ -259,59 +259,6 @@ void GLContextCGL::OnSetSwapInterval(int interval) {
DCHECK(IsCurrent(nullptr));
}
-bool GLContextCGL::GetTotalGpuMemory(size_t* bytes) {
- DCHECK(bytes);
- *bytes = 0;
-
- CGLContextObj context = reinterpret_cast<CGLContextObj>(context_);
- if (!context)
- return false;
-
- // Retrieve the current renderer ID
- GLint current_renderer_id = 0;
- if (CGLGetParameter(context,
- kCGLCPCurrentRendererID,
- &current_renderer_id) != kCGLNoError)
- return false;
-
- // Iterate through the list of all renderers
- GLuint display_mask = static_cast<GLuint>(-1);
- CGLRendererInfoObj renderer_info = nullptr;
- GLint num_renderers = 0;
- if (CGLQueryRendererInfo(display_mask,
- &renderer_info,
- &num_renderers) != kCGLNoError)
- return false;
-
- scoped_ptr<CGLRendererInfoObj,
- CGLRendererInfoObjDeleter> scoper(&renderer_info);
-
- for (GLint renderer_index = 0;
- renderer_index < num_renderers;
- ++renderer_index) {
- // Skip this if this renderer is not the current renderer.
- GLint renderer_id = 0;
- if (CGLDescribeRenderer(renderer_info,
- renderer_index,
- kCGLRPRendererID,
- &renderer_id) != kCGLNoError)
- continue;
- if (renderer_id != current_renderer_id)
- continue;
- // Retrieve the video memory for the renderer.
- GLint video_memory = 0;
- if (CGLDescribeRenderer(renderer_info,
- renderer_index,
- kCGLRPVideoMemory,
- &video_memory) != kCGLNoError)
- continue;
- *bytes = video_memory;
- return true;
- }
-
- return false;
-}
-
void GLContextCGL::SetSafeToForceGpuSwitch() {
safe_to_force_gpu_switch_ = true;
}
diff --git a/ui/gl/gl_context_cgl.h b/ui/gl/gl_context_cgl.h
index 2c01998..3cb850a 100644
--- a/ui/gl/gl_context_cgl.h
+++ b/ui/gl/gl_context_cgl.h
@@ -26,7 +26,6 @@ class GLContextCGL : public GLContextReal {
bool IsCurrent(GLSurface* surface) override;
void* GetHandle() override;
void OnSetSwapInterval(int interval) override;
- bool GetTotalGpuMemory(size_t* bytes) override;
void SetSafeToForceGpuSwitch() override;
bool ForceGpuSwitchIfNeeded() override;
diff --git a/ui/gl/gl_context_egl.cc b/ui/gl/gl_context_egl.cc
index ed83b85..8a9e4e4 100644
--- a/ui/gl/gl_context_egl.cc
+++ b/ui/gl/gl_context_egl.cc
@@ -216,12 +216,4 @@ GLContextEGL::~GLContextEGL() {
Destroy();
}
-#if !defined(OS_ANDROID)
-bool GLContextEGL::GetTotalGpuMemory(size_t* bytes) {
- DCHECK(bytes);
- *bytes = 0;
- return false;
-}
-#endif
-
} // namespace gfx
diff --git a/ui/gl/gl_context_egl.h b/ui/gl/gl_context_egl.h
index 0d46723..e86bcaa 100644
--- a/ui/gl/gl_context_egl.h
+++ b/ui/gl/gl_context_egl.h
@@ -33,7 +33,6 @@ class GLContextEGL : public GLContextReal {
void OnSetSwapInterval(int interval) override;
std::string GetExtensions() override;
bool WasAllocatedUsingRobustnessExtension() override;
- bool GetTotalGpuMemory(size_t* bytes) override;
void SetUnbindFboOnMakeCurrent() override;
protected:
diff --git a/ui/gl/gl_context_glx.cc b/ui/gl/gl_context_glx.cc
index a8711a8..d79d85c 100644
--- a/ui/gl/gl_context_glx.cc
+++ b/ui/gl/gl_context_glx.cc
@@ -193,19 +193,6 @@ std::string GLContextGLX::GetExtensions() {
return GLContext::GetExtensions();
}
-bool GLContextGLX::GetTotalGpuMemory(size_t* bytes) {
- DCHECK(bytes);
- *bytes = 0;
- if (HasExtension("GL_NVX_gpu_memory_info")) {
- GLint kbytes = 0;
- glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &kbytes);
- *bytes =
- base::saturated_cast<size_t>(1024u * static_cast<uint64_t>(kbytes));
- return true;
- }
- return false;
-}
-
bool GLContextGLX::WasAllocatedUsingRobustnessExtension() {
return GLSurfaceGLX::IsCreateContextRobustnessSupported();
}
diff --git a/ui/gl/gl_context_glx.h b/ui/gl/gl_context_glx.h
index ac93a5d..3881b44 100644
--- a/ui/gl/gl_context_glx.h
+++ b/ui/gl/gl_context_glx.h
@@ -32,7 +32,6 @@ class GL_EXPORT GLContextGLX : public GLContextReal {
void* GetHandle() override;
void OnSetSwapInterval(int interval) override;
std::string GetExtensions() override;
- bool GetTotalGpuMemory(size_t* bytes) override;
bool WasAllocatedUsingRobustnessExtension() override;
protected: