summaryrefslogtreecommitdiffstats
path: root/content/common/gpu/gpu_memory_manager.h
blob: 1946176f3e512e3c504f5e0a08e87b16037c33eb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_
#define CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_

#include <list>
#include <map>

#include "base/basictypes.h"
#include "base/cancelable_callback.h"
#include "base/gtest_prod_util.h"
#include "base/hash_tables.h"
#include "base/memory/weak_ptr.h"
#include "content/common/content_export.h"
#include "content/common/gpu/gpu_memory_allocation.h"
#include "content/public/common/gpu_memory_stats.h"
#include "gpu/command_buffer/service/memory_tracking.h"

namespace content {

class GpuChannelManager;
class GpuMemoryManagerClient;
class GpuMemoryManagerClientState;
class GpuMemoryTrackingGroup;

class CONTENT_EXPORT GpuMemoryManager :
    public base::SupportsWeakPtr<GpuMemoryManager> {
 public:
  enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
  enum ScheduleManageTime {
    // Add a call to Manage to the thread's message loop immediately.
    kScheduleManageNow,
    // Add a Manage call to the thread's message loop for execution 1/60th of
    // of a second from now.
    kScheduleManageLater,
  };

  GpuMemoryManager(GpuChannelManager* channel_manager,
                   size_t max_surfaces_with_frontbuffer_soft_limit);
  ~GpuMemoryManager();

  // Schedule a Manage() call. If immediate is true, we PostTask without delay.
  // Otherwise PostDelayedTask using a CancelableClosure and allow multiple
  // delayed calls to "queue" up. This way, we do not spam clients in certain
  // lower priority situations. An immediate schedule manage will cancel any
  // queued delayed manage.
  void ScheduleManage(ScheduleManageTime schedule_manage_time);

  // Retrieve GPU Resource consumption statistics for the task manager
  void GetVideoMemoryUsageStats(
      content::GPUVideoMemoryUsageStats* video_memory_usage_stats) const;
  void SetWindowCount(uint32 count);

  GpuMemoryManagerClientState* CreateClientState(
      GpuMemoryManagerClient* client, bool has_surface, bool visible);

  GpuMemoryTrackingGroup* CreateTrackingGroup(
      base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);

 private:
  friend class GpuMemoryManagerTest;
  friend class GpuMemoryTrackingGroup;
  friend class GpuMemoryManagerClientState;

  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           ComparatorTests);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManageBasicFunctionality);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManageChangingVisibility);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManageManyVisibleStubs);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManageManyNotVisibleStubs);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManageChangingLastUsedTime);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManageChangingImportanceShareGroup);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestForegroundStubsGetBonusAllocation);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestUpdateAvailableGpuMemory);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           GpuMemoryAllocationCompareTests);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           StubMemoryStatsForLastManageTests);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestManagedUsageTracking);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestBackgroundCutoff);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestBackgroundMru);
  FRIEND_TEST_ALL_PREFIXES(GpuMemoryManagerTest,
                           TestUnmanagedTracking);

  typedef std::map<gpu::gles2::MemoryTracker*, GpuMemoryTrackingGroup*>
      TrackingGroupMap;

  typedef std::list<GpuMemoryManagerClientState*> ClientStateList;

  void Manage();
  void SetClientsHibernatedState() const;
  size_t GetVisibleClientAllocation() const;
  size_t GetCurrentNonvisibleAvailableGpuMemory() const;
  void AssignSurfacesAllocationsNonuniform();
  void AssignSurfacesAllocationsUniform();
  void AssignNonSurfacesAllocations();

  // Math helper function to compute the maximum value of cap such that
  // sum_i min(bytes[i], cap) <= bytes_sum_limit
  static size_t ComputeCap(std::vector<size_t> bytes, size_t bytes_sum_limit);

  // Compute the allocation for clients when visible and not visible.
  void ComputeVisibleSurfacesAllocationsNonuniform();
  void ComputeNonvisibleSurfacesAllocationsNonuniform();

  // Compute the budget for a client. Allow at most bytes_above_required_cap
  // bytes above client_state's required level. Allow at most
  // bytes_above_minimum_cap bytes above client_state's minimum level. Allow
  // at most bytes_overall_cap bytes total.
  size_t ComputeClientAllocationWhenVisible(
      GpuMemoryManagerClientState* client_state,
      size_t bytes_above_required_cap,
      size_t bytes_above_minimum_cap,
      size_t bytes_overall_cap);
  size_t ComputeClientAllocationWhenNonvisible(
      GpuMemoryManagerClientState* client_state);

  // Update the amount of GPU memory we think we have in the system, based
  // on what the stubs' contexts report.
  void UpdateAvailableGpuMemory();
  void UpdateUnmanagedMemoryLimits();
  void UpdateNonvisibleAvailableGpuMemory();

  // The amount of video memory which is available for allocation.
  size_t GetAvailableGpuMemory() const;

  // Minimum value of available GPU memory, no matter how little the GPU
  // reports. This is the default value.
  size_t GetDefaultAvailableGpuMemory() const;

  // Maximum cap on total GPU memory, no matter how much the GPU reports.
  size_t GetMaximumTotalGpuMemory() const;

  // The maximum and minimum amount of memory that a tab may be assigned.
  size_t GetMaximumClientAllocation() const;
  size_t GetMinimumClientAllocation() const;

  // Get a reasonable memory limit from a viewport's surface area.
  static size_t CalcAvailableFromViewportArea(int viewport_area);
  static size_t CalcAvailableFromGpuTotal(size_t total_gpu_memory);

  // Send memory usage stats to the browser process.
  void SendUmaStatsToBrowser();

  // Get the current number of bytes allocated.
  size_t GetCurrentUsage() const {
    return bytes_allocated_managed_current_ +
        bytes_allocated_unmanaged_current_;
  }

  // GpuMemoryTrackingGroup interface
  void TrackMemoryAllocatedChange(
      GpuMemoryTrackingGroup* tracking_group,
      size_t old_size,
      size_t new_size,
      gpu::gles2::MemoryTracker::Pool tracking_pool);
  void OnDestroyTrackingGroup(GpuMemoryTrackingGroup* tracking_group);
  bool EnsureGPUMemoryAvailable(size_t size_needed);

  // GpuMemoryManagerClientState interface
  void SetClientStateVisible(
      GpuMemoryManagerClientState* client_state, bool visible);
  void SetClientStateManagedMemoryStats(
      GpuMemoryManagerClientState* client_state,
      const GpuManagedMemoryStats& stats);
  void OnDestroyClientState(GpuMemoryManagerClientState* client);

  // Add or remove a client from its clients list (visible, nonvisible, or
  // nonsurface). When adding the client, add it to the front of the list.
  void AddClientToList(GpuMemoryManagerClientState* client_state);
  void RemoveClientFromList(GpuMemoryManagerClientState* client_state);
  ClientStateList* GetClientList(GpuMemoryManagerClientState* client_state);

  // Interfaces for testing
  void TestingDisableScheduleManage() { disable_schedule_manage_ = true; }
  void TestingSetAvailableGpuMemory(size_t bytes) {
    bytes_available_gpu_memory_ = bytes;
    bytes_available_gpu_memory_overridden_ = true;
  }

  void TestingSetMinimumClientAllocation(size_t bytes) {
    bytes_minimum_per_client_ = bytes;
    bytes_minimum_per_client_overridden_ = true;
  }

  void TestingSetUnmanagedLimitStep(size_t bytes) {
    bytes_unmanaged_limit_step_ = bytes;
  }

  void TestingSetNonvisibleAvailableGpuMemory(size_t bytes) {
    bytes_nonvisible_available_gpu_memory_ = bytes;
  }

  GpuChannelManager* channel_manager_;

  // The new memory policy does not uniformly assign memory to tabs, but
  // scales the assignments to the tabs' needs.
  bool use_nonuniform_memory_policy_;

  // A list of all visible and nonvisible clients, in most-recently-used
  // order (most recently used is first).
  ClientStateList clients_visible_mru_;
  ClientStateList clients_nonvisible_mru_;

  // A list of all clients that don't have a surface.
  ClientStateList clients_nonsurface_;

  // All context groups' tracking structures
  TrackingGroupMap tracking_groups_;

  base::CancelableClosure delayed_manage_callback_;
  bool manage_immediate_scheduled_;

  size_t max_surfaces_with_frontbuffer_soft_limit_;

  // The maximum amount of memory that may be allocated for GPU resources
  size_t bytes_available_gpu_memory_;
  bool bytes_available_gpu_memory_overridden_;

  // The minimum allocation that may be given to a single renderer.
  size_t bytes_minimum_per_client_;
  bool bytes_minimum_per_client_overridden_;

  // The maximum amount of memory that can be allocated for GPU resources
  // in nonvisible renderers.
  size_t bytes_nonvisible_available_gpu_memory_;

  // The current total memory usage, and historical maximum memory usage
  size_t bytes_allocated_managed_current_;
  size_t bytes_allocated_managed_visible_;
  size_t bytes_allocated_managed_nonvisible_;
  size_t bytes_allocated_unmanaged_current_;
  size_t bytes_allocated_historical_max_;

  // If bytes_allocated_unmanaged_current_ leaves the interval [low_, high_),
  // then ScheduleManage to take the change into account.
  size_t bytes_allocated_unmanaged_high_;
  size_t bytes_allocated_unmanaged_low_;

  // Update bytes_allocated_unmanaged_low/high_ in intervals of step_.
  size_t bytes_unmanaged_limit_step_;

  // The number of browser windows that exist. If we ever receive a
  // GpuMsg_SetVideoMemoryWindowCount, then we use this to compute memory
  // allocations, instead of doing more complicated stub-based calculations.
  bool window_count_has_been_received_;
  uint32 window_count_;

  // Used to disable automatic changes to Manage() in testing.
  bool disable_schedule_manage_;

  DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager);
};

}  // namespace content

#endif // CONTENT_COMMON_GPU_GPU_MEMORY_MANAGER_H_