summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorccameron@chromium.org <ccameron@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-01 23:18:17 +0000
committerccameron@chromium.org <ccameron@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-11-01 23:18:17 +0000
commit02882d2ea6004b5dfe820632516ce94e337605a8 (patch)
tree132fb1ecc338c1ae9bcba5309e02da7f72053bd1 /content
parente8789bb86b44198c629a8733714a0a1e26ac4bb7 (diff)
downloadchromium_src-02882d2ea6004b5dfe820632516ce94e337605a8.zip
chromium_src-02882d2ea6004b5dfe820632516ce94e337605a8.tar.gz
chromium_src-02882d2ea6004b5dfe820632516ce94e337605a8.tar.bz2
Adjust gpu memory manager limits for Android for high-res tablets.
We could return the limit from the Android egl context (since it knows its surface size), but it doesn't quite fit with the code in UpdateAvailableGpuMemory, and would be coupled to the other logic in there, so I decided to keep all the code in one place. This also converges behavior and tests. The behavior is the same now, minus calculating the total memory available which has a new test. Since only foreground tabs/windows are given memory this should work fine. However, we will need to be careful when enabling partial evictions of background tabs, as Android assumes background tabs are fully evicted. BUG=158465 Review URL: https://chromiumcodereview.appspot.com/11360047 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@165544 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/common/gpu/gpu_memory_manager.cc76
-rw-r--r--content/common/gpu/gpu_memory_manager.h26
-rw-r--r--content/common/gpu/gpu_memory_manager_unittest.cc117
3 files changed, 123 insertions, 96 deletions
diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc
index 0d3858b..877d29a 100644
--- a/content/common/gpu/gpu_memory_manager.cc
+++ b/content/common/gpu/gpu_memory_manager.cc
@@ -14,6 +14,7 @@
#include "base/message_loop.h"
#include "base/process_util.h"
#include "base/string_number_conversions.h"
+#include "base/sys_info.h"
#include "content/common/gpu/gpu_command_buffer_stub.h"
#include "content/common/gpu/gpu_memory_allocation.h"
#include "content/common/gpu/gpu_memory_tracking.h"
@@ -48,24 +49,6 @@ void AssignMemoryAllocations(
}
-size_t GpuMemoryManager::CalculateBonusMemoryAllocationBasedOnSize(
- gfx::Size size) const {
- const int kViewportMultiplier = 16;
- const unsigned int kComponentsPerPixel = 4; // GraphicsContext3D::RGBA
- const unsigned int kBytesPerComponent = 1; // sizeof(GC3Dubyte)
-
- if (size.IsEmpty())
- return 0;
-
- size_t limit = kViewportMultiplier * size.width() * size.height() *
- kComponentsPerPixel * kBytesPerComponent;
- if (limit < GetMinimumTabAllocation())
- limit = GetMinimumTabAllocation();
- else if (limit > GetAvailableGpuMemory())
- limit = GetAvailableGpuMemory();
- return limit - GetMinimumTabAllocation();
-}
-
GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
size_t max_surfaces_with_frontbuffer_soft_limit)
: client_(client),
@@ -94,6 +77,32 @@ GpuMemoryManager::~GpuMemoryManager() {
DCHECK(tracking_groups_.empty());
}
+size_t GpuMemoryManager::CalcAvailableFromViewportArea(int viewport_area) {
+ // We can't query available GPU memory from the system on Android, but
+ // 18X the viewport and 50% of the dalvik heap size give us a good
+ // estimate of available GPU memory on a wide range of devices.
+ const int kViewportMultiplier = 18;
+ const unsigned int kComponentsPerPixel = 4; // GraphicsContext3D::RGBA
+ const unsigned int kBytesPerComponent = 1; // sizeof(GC3Dubyte)
+ size_t viewport_limit = viewport_area * kViewportMultiplier *
+ kComponentsPerPixel *
+ kBytesPerComponent;
+#if !defined(OS_ANDROID)
+ return viewport_limit;
+#else
+ static size_t dalvik_limit = 0;
+ if (!dalvik_limit)
+ dalvik_limit = (base::SysInfo::DalvikHeapSizeMB() / 2) * 1024 * 1024;
+ return std::min(viewport_limit, dalvik_limit);
+#endif
+}
+
+size_t GpuMemoryManager::CalcAvailableFromGpuTotal(size_t total_gpu_memory) {
+ // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU
+ // memory, whichever is less.
+ return std::min(3 * total_gpu_memory / 4, total_gpu_memory - 64*1024*1024);
+}
+
void GpuMemoryManager::UpdateAvailableGpuMemory(
std::vector<GpuCommandBufferStubBase*>& stubs) {
// If the amount of video memory to use was specified at the command
@@ -101,6 +110,19 @@ void GpuMemoryManager::UpdateAvailableGpuMemory(
if (bytes_available_gpu_memory_overridden_)
return;
+#if defined(OS_ANDROID)
+ // On Android we use the surface size, so this finds the largest visible
+ // surface size instead of lowest gpu's limit.
+ int max_surface_area = 0;
+ for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
+ it != stubs.end(); ++it) {
+ GpuCommandBufferStubBase* stub = *it;
+ gfx::Size surface_size = stub->GetSurfaceSize();
+ max_surface_area = std::max(max_surface_area, surface_size.width() *
+ surface_size.height());
+ }
+ bytes_available_gpu_memory_ = CalcAvailableFromViewportArea(max_surface_area);
+#else
// We do not have a reliable concept of multiple GPUs existing in
// a system, so just be safe and go with the minimum encountered.
size_t bytes_min = 0;
@@ -116,18 +138,17 @@ void GpuMemoryManager::UpdateAvailableGpuMemory(
if (!bytes_min)
return;
- // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU
- // memory, whichever is less.
- bytes_available_gpu_memory_ = std::min(3 * bytes_min / 4,
- bytes_min - 64*1024*1024);
+ bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min);
+
+#endif
// And never go below the default allocation
bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_,
GetDefaultAvailableGpuMemory());
- // And never go above 1GB
+ // And never go above the maximum.
bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_,
- static_cast<size_t>(1024*1024*1024));
+ GetMaximumTotalGpuMemory());
}
bool GpuMemoryManager::StubWithSurfaceComparator::operator()(
@@ -328,7 +349,6 @@ void GpuMemoryManager::Manage() {
UpdateAvailableGpuMemory(stubs_with_surface_foreground);
size_t bonus_allocation = 0;
-#if !defined(OS_ANDROID)
// Calculate bonus allocation by splitting remainder of global limit equally
// after giving out the minimum to those that need it.
size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
@@ -339,12 +359,6 @@ void GpuMemoryManager::Manage() {
!stubs_with_surface_foreground.empty())
bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) /
stubs_with_surface_foreground.size();
-#else
- // On android, calculate bonus allocation based on surface size.
- if (!stubs_with_surface_foreground.empty())
- bonus_allocation = CalculateBonusMemoryAllocationBasedOnSize(
- stubs_with_surface_foreground[0]->GetSurfaceSize());
-#endif
size_t stubs_with_surface_foreground_allocation = GetMinimumTabAllocation() +
bonus_allocation;
diff --git a/content/common/gpu/gpu_memory_manager.h b/content/common/gpu/gpu_memory_manager.h
index 9690f4a..1aadfdd 100644
--- a/content/common/gpu/gpu_memory_manager.h
+++ b/content/common/gpu/gpu_memory_manager.h
@@ -81,8 +81,6 @@ class CONTENT_EXPORT GpuMemoryManager :
// The context groups' tracking structures
std::set<GpuMemoryTrackingGroup*> tracking_groups_;
- size_t CalculateBonusMemoryAllocationBasedOnSize(gfx::Size size) const;
-
// Update the amount of GPU memory we think we have in the system, based
// on what the stubs' contexts report.
void UpdateAvailableGpuMemory(std::vector<GpuCommandBufferStubBase*>& stubs);
@@ -96,7 +94,7 @@ class CONTENT_EXPORT GpuMemoryManager :
// if we can't query the driver for an exact value.
size_t GetDefaultAvailableGpuMemory() const {
#if defined(OS_ANDROID)
- return 64 * 1024 * 1024;
+ return 32 * 1024 * 1024;
#elif defined(OS_CHROMEOS)
return 1024 * 1024 * 1024;
#else
@@ -104,11 +102,23 @@ class CONTENT_EXPORT GpuMemoryManager :
#endif
}
- // The maximum amount of memory that a tab may be assigned
-size_t GetMaximumTabAllocation() const {
+ // Get a reasonable memory limit from a viewport's surface area.
+ static size_t CalcAvailableFromViewportArea(int viewport_area);
+ static size_t CalcAvailableFromGpuTotal(size_t total_gpu_memory);
+
+ // Maximum cap on total GPU memory, no matter how much
+ // the GPU reports to have.
+ static size_t GetMaximumTotalGpuMemory() {
#if defined(OS_ANDROID)
- return 128 * 1024 * 1024;
-#elif defined(OS_CHROMEOS)
+ return 256 * 1024 * 1024;
+#else
+ return 1024 * 1024 * 1024;
+#endif
+ }
+
+ // The maximum amount of memory that a tab may be assigned
+ size_t GetMaximumTabAllocation() const {
+#if defined(OS_ANDROID) || defined(OS_CHROMEOS)
return bytes_available_gpu_memory_;
#else
// This is to avoid allowing a single page on to use a full 256MB of memory
@@ -119,7 +129,7 @@ size_t GetMaximumTabAllocation() const {
}
// The minimum non-zero amount of memory that a tab may be assigned
- size_t GetMinimumTabAllocation() const {
+ static size_t GetMinimumTabAllocation() {
#if defined(OS_ANDROID)
return 32 * 1024 * 1024;
#elif defined(OS_CHROMEOS)
diff --git a/content/common/gpu/gpu_memory_manager_unittest.cc b/content/common/gpu/gpu_memory_manager_unittest.cc
index 3be111b..8ac5fcb 100644
--- a/content/common/gpu/gpu_memory_manager_unittest.cc
+++ b/content/common/gpu/gpu_memory_manager_unittest.cc
@@ -44,17 +44,20 @@ class FakeCommandBufferStub : public GpuCommandBufferStubBase {
public:
MemoryManagerState memory_manager_state_;
GpuMemoryAllocation allocation_;
- gfx::Size size_;
+ gfx::Size surface_size_;
+ size_t total_gpu_memory_;
FakeCommandBufferStub()
- : memory_manager_state_(0, false, base::TimeTicks()) {
+ : memory_manager_state_(0, false, base::TimeTicks())
+ , total_gpu_memory_(0) {
memory_manager_state_.client_has_memory_allocation_changed_callback = true;
}
FakeCommandBufferStub(int32 surface_id,
bool visible,
base::TimeTicks last_used_time)
- : memory_manager_state_(surface_id != 0, visible, last_used_time) {
+ : memory_manager_state_(surface_id != 0, visible, last_used_time)
+ , total_gpu_memory_(0) {
memory_manager_state_.client_has_memory_allocation_changed_callback = true;
}
@@ -63,7 +66,7 @@ class FakeCommandBufferStub : public GpuCommandBufferStubBase {
}
virtual gfx::Size GetSurfaceSize() const {
- return size_;
+ return surface_size_;
}
virtual bool IsInSameContextShareGroup(
const GpuCommandBufferStubBase& stub) const {
@@ -74,8 +77,15 @@ class FakeCommandBufferStub : public GpuCommandBufferStubBase {
StubAssignmentCollector::AddStubStat(this, alloc);
}
virtual bool GetTotalGpuMemory(size_t* bytes) {
+ if (total_gpu_memory_) {
+ *bytes = total_gpu_memory_;
+ return true;
+ }
return false;
}
+
+ void SetTotalGpuMemory(size_t bytes) { total_gpu_memory_ = bytes; }
+ void SetSurfaceSize(gfx::Size size) { surface_size_ = size; }
};
class FakeCommandBufferStubWithoutSurface : public GpuCommandBufferStubBase {
@@ -190,6 +200,20 @@ class GpuMemoryManagerTest : public testing::Test {
memory_manager_.Manage();
}
+ size_t CalcAvailableFromGpuTotal(size_t bytes) {
+ return GpuMemoryManager::CalcAvailableFromGpuTotal(bytes);
+ }
+
+ size_t CalcAvailableFromViewportArea(int viewport_area) {
+ return GpuMemoryManager::CalcAvailableFromViewportArea(viewport_area);
+ }
+
+ size_t CalcAvailableClamped(size_t bytes) {
+ bytes = std::max(bytes, memory_manager_.GetDefaultAvailableGpuMemory());
+ bytes = std::min(bytes, memory_manager_.GetMaximumTotalGpuMemory());
+ return bytes;
+ }
+
size_t GetAvailableGpuMemory() {
return memory_manager_.GetAvailableGpuMemory();
}
@@ -521,7 +545,6 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_));
}
-#if !defined(OS_ANDROID)
// Test GpuMemoryAllocation memory allocation bonuses:
// When the number of visible tabs is small, each tab should get a
// gpu_resource_size_in_bytes allocation value that is greater than
@@ -559,49 +582,38 @@ TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocation) {
GetMinimumTabAllocation());
}
}
-#else
-// Test GpuMemoryAllocation memory allocation bonuses:
-// When the size of tab contents is small, bonus allocation should be 0.
-// As the size of tab contents increases, bonus allocation should increase
-// until finally reaching the maximum allocation limit.
-TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocationAndroid) {
- FakeCommandBufferStub stub(GenerateUniqueSurfaceId(), true, older_);
- client_.stubs_.push_back(&stub);
-
- stub.size_ = gfx::Size(1,1);
- Manage();
- EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub.allocation_));
- EXPECT_EQ(stub.allocation_.renderer_allocation.bytes_limit_when_visible,
- GetMinimumTabAllocation());
-
- // Keep increasing size, making sure allocation is always increasing
- // Until it finally reaches the maximum.
- while (stub.allocation_.renderer_allocation.bytes_limit_when_visible <
- GetAvailableGpuMemory()) {
- size_t previous_allocation =
- stub.allocation_.renderer_allocation.bytes_limit_when_visible;
-
- stub.size_ = gfx::ToFlooredSize(stub.size_.Scale(1, 2));
-
- Manage();
- EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub.allocation_));
- EXPECT_GE(stub.allocation_.renderer_allocation.bytes_limit_when_visible,
- GetMinimumTabAllocation());
- EXPECT_LE(stub.allocation_.renderer_allocation.bytes_limit_when_visible,
- GetAvailableGpuMemory());
- EXPECT_GE(stub.allocation_.renderer_allocation.bytes_limit_when_visible,
- previous_allocation);
- }
-
- // One final size increase to confirm it stays capped at maximum.
- stub.size_ = gfx::ToFlooredSize(stub.size_.Scale(1, 2));
+// Test GpuMemoryManager::UpdateAvailableGpuMemory functionality
+TEST_F(GpuMemoryManagerTest, TestUpdateAvailableGpuMemory) {
+ FakeCommandBufferStub stub1(GenerateUniqueSurfaceId(), true, older_),
+ stub2(GenerateUniqueSurfaceId(), false, older_),
+ stub3(GenerateUniqueSurfaceId(), true, older_),
+ stub4(GenerateUniqueSurfaceId(), false, older_);
+ client_.stubs_.push_back(&stub1);
+ client_.stubs_.push_back(&stub2);
+ client_.stubs_.push_back(&stub3);
+ client_.stubs_.push_back(&stub4);
+#if defined(OS_ANDROID)
+ // We use the largest visible surface size to calculate the limit
+ stub1.SetSurfaceSize(gfx::Size(1024, 512)); // Surface size
+ stub2.SetSurfaceSize(gfx::Size(2048, 512)); // Larger but not visible.
+ stub3.SetSurfaceSize(gfx::Size(512, 512)); // Visible but smaller.
+ stub4.SetSurfaceSize(gfx::Size(512, 512)); // Not visible and smaller.
Manage();
- EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub.allocation_));
- EXPECT_EQ(stub.allocation_.renderer_allocation.bytes_limit_when_visible,
- GetAvailableGpuMemory());
-}
+ size_t bytes_expected = CalcAvailableFromViewportArea(1024*512);
+#else
+ // We take the lowest GPU's total memory as the limit
+ size_t expected = 400 * 1024 * 1024;
+ stub1.SetTotalGpuMemory(expected); // GPU Memory
+ stub2.SetTotalGpuMemory(expected - 1024 * 1024); // Smaller but not visible.
+ stub3.SetTotalGpuMemory(expected + 1024 * 1024); // Visible but larger.
+ stub4.SetTotalGpuMemory(expected + 1024 * 1024); // Not visible and larger.
+ Manage();
+ size_t bytes_expected = CalcAvailableFromGpuTotal(expected);
#endif
+ EXPECT_EQ(GetAvailableGpuMemory(), CalcAvailableClamped(bytes_expected));
+}
+
// Test GpuMemoryAllocation comparison operators: Iterate over all possible
// combinations of gpu_resource_size_in_bytes, suggest_have_backbuffer, and
@@ -651,12 +663,6 @@ TEST_F(GpuMemoryManagerTest, GpuMemoryAllocationCompareTests) {
// Creats various surface/non-surface stubs and switches stub visibility and
// tests to see that stats data structure values are correct.
TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
-#if !defined(OS_ANDROID)
- const bool compositors_get_bonus_allocation = true;
-#else
- const bool compositors_get_bonus_allocation = false;
-#endif
-
StubAssignmentCollector::StubMemoryStatMap stats;
Manage();
@@ -688,8 +694,7 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_EQ(stats.size(), 2ul);
EXPECT_GT(stub1allocation2, 0ul);
EXPECT_GT(stub2allocation2, 0ul);
- if (compositors_get_bonus_allocation &&
- stub1allocation2 != GetMaximumTabAllocation())
+ if (stub1allocation2 != GetMaximumTabAllocation())
EXPECT_LT(stub1allocation2, stub1allocation1);
FakeCommandBufferStub stub3(GenerateUniqueSurfaceId(), true, older_);
@@ -707,8 +712,7 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_GT(stub1allocation3, 0ul);
EXPECT_GT(stub2allocation3, 0ul);
EXPECT_GT(stub3allocation3, 0ul);
- if (compositors_get_bonus_allocation &&
- stub1allocation3 != GetMaximumTabAllocation())
+ if (stub1allocation3 != GetMaximumTabAllocation())
EXPECT_LT(stub1allocation3, stub1allocation2);
stub1.memory_manager_state_.visible = false;
@@ -725,8 +729,7 @@ TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
EXPECT_GT(stub1allocation4, 0ul);
EXPECT_GE(stub2allocation4, 0ul);
EXPECT_GT(stub3allocation4, 0ul);
- if (compositors_get_bonus_allocation &&
- stub3allocation3 != GetMaximumTabAllocation())
+ if (stub3allocation3 != GetMaximumTabAllocation())
EXPECT_GT(stub3allocation4, stub3allocation3);
}