summaryrefslogtreecommitdiffstats
path: root/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'gpu')
-rw-r--r--gpu/GLES2/gl2chromium_autogen.h2
-rwxr-xr-xgpu/command_buffer/build_gles2_cmd_buffer.py22
-rw-r--r--gpu/command_buffer/client/buffer_tracker.cc22
-rw-r--r--gpu/command_buffer/client/buffer_tracker.h26
-rw-r--r--gpu/command_buffer/client/buffer_tracker_unittest.cc24
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper.h9
-rw-r--r--gpu/command_buffer/client/fenced_allocator.cc11
-rw-r--r--gpu/command_buffer/client/fenced_allocator.h8
-rw-r--r--gpu/command_buffer/client/fenced_allocator_test.cc71
-rw-r--r--gpu/command_buffer/client/gles2_c_lib_autogen.h6
-rw-r--r--gpu/command_buffer/client/gles2_cmd_helper_autogen.h28
-rw-r--r--gpu/command_buffer/client/gles2_implementation.cc191
-rw-r--r--gpu/command_buffer/client/gles2_implementation.h44
-rw-r--r--gpu/command_buffer/client/gles2_implementation_autogen.h2
-rw-r--r--gpu/command_buffer/client/gles2_interface_autogen.h1
-rw-r--r--gpu/command_buffer/client/gles2_interface_stub_autogen.h1
-rw-r--r--gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h1
-rw-r--r--gpu/command_buffer/client/gles2_trace_implementation_autogen.h1
-rw-r--r--gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h6
-rw-r--r--gpu/command_buffer/client/mapped_memory.cc9
-rw-r--r--gpu/command_buffer/client/mapped_memory.h15
-rw-r--r--gpu/command_buffer/client/mapped_memory_unittest.cc75
-rw-r--r--gpu/command_buffer/client/query_tracker_unittest.cc9
-rw-r--r--gpu/command_buffer/cmd_buffer_functions.txt1
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format.h19
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_autogen.h89
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_test.cc67
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_test_autogen.h26
-rw-r--r--gpu/command_buffer/common/gles2_cmd_ids_autogen.h17
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager.h3
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc10
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_egl.h1
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc17
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_idle.h7
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_mock.h1
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc10
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h1
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc3
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_stub.h1
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc3
-rw-r--r--gpu/command_buffer/service/async_pixel_transfer_manager_sync.h1
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.cc94
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc56
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h2
44 files changed, 927 insertions, 86 deletions
diff --git a/gpu/GLES2/gl2chromium_autogen.h b/gpu/GLES2/gl2chromium_autogen.h
index 19f89f3..bbef99f 100644
--- a/gpu/GLES2/gl2chromium_autogen.h
+++ b/gpu/GLES2/gl2chromium_autogen.h
@@ -225,6 +225,8 @@
#define glAsyncTexSubImage2DCHROMIUM GLES2_GET_FUN(AsyncTexSubImage2DCHROMIUM)
#define glAsyncTexImage2DCHROMIUM GLES2_GET_FUN(AsyncTexImage2DCHROMIUM)
#define glWaitAsyncTexImage2DCHROMIUM GLES2_GET_FUN(WaitAsyncTexImage2DCHROMIUM)
+#define glWaitAllAsyncTexImage2DCHROMIUM \
+ GLES2_GET_FUN(WaitAllAsyncTexImage2DCHROMIUM)
#define glDiscardFramebufferEXT GLES2_GET_FUN(DiscardFramebufferEXT)
#define glLoseContextCHROMIUM GLES2_GET_FUN(LoseContextCHROMIUM)
#define glInsertSyncPointCHROMIUM GLES2_GET_FUN(InsertSyncPointCHROMIUM)
diff --git a/gpu/command_buffer/build_gles2_cmd_buffer.py b/gpu/command_buffer/build_gles2_cmd_buffer.py
index da0aa28..e635ae5 100755
--- a/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -2462,6 +2462,14 @@ _FUNCTION_INFO = {
'type': 'Manual',
'immediate': False,
'client_test': False,
+ 'cmd_args': 'GLenumTextureTarget target, GLint level, '
+ 'GLintTextureInternalFormat internalformat, '
+ 'GLsizei width, GLsizei height, '
+ 'GLintTextureBorder border, '
+ 'GLenumTextureFormat format, GLenumPixelType type, '
+ 'const void* pixels, '
+ 'uint32 async_upload_token, '
+ 'void* sync_data',
'extension': True,
'chromium': True,
},
@@ -2469,6 +2477,13 @@ _FUNCTION_INFO = {
'type': 'Manual',
'immediate': False,
'client_test': False,
+ 'cmd_args': 'GLenumTextureTarget target, GLint level, '
+ 'GLint xoffset, GLint yoffset, '
+ 'GLsizei width, GLsizei height, '
+ 'GLenumTextureFormat format, GLenumPixelType type, '
+ 'const void* data, '
+ 'uint32 async_upload_token, '
+ 'void* sync_data',
'extension': True,
'chromium': True,
},
@@ -2479,6 +2494,13 @@ _FUNCTION_INFO = {
'extension': True,
'chromium': True,
},
+ 'WaitAllAsyncTexImage2DCHROMIUM': {
+ 'type': 'Manual',
+ 'immediate': False,
+ 'client_test': False,
+ 'extension': True,
+ 'chromium': True,
+ },
'DiscardFramebufferEXT': {
'type': 'PUTn',
'count': 1,
diff --git a/gpu/command_buffer/client/buffer_tracker.cc b/gpu/command_buffer/client/buffer_tracker.cc
index 18cedb9..5887e52 100644
--- a/gpu/command_buffer/client/buffer_tracker.cc
+++ b/gpu/command_buffer/client/buffer_tracker.cc
@@ -60,8 +60,30 @@ void BufferTracker::FreePendingToken(Buffer* buffer, int32 token) {
buffer->shm_id_ = 0;
buffer->shm_offset_ = 0;
buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
}
+void BufferTracker::Unmanage(Buffer* buffer) {
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
+
+void BufferTracker::Free(Buffer* buffer) {
+ if (buffer->address_)
+ mapped_memory_->Free(buffer->address_);
+
+ buffer->size_ = 0;
+ buffer->shm_id_ = 0;
+ buffer->shm_offset_ = 0;
+ buffer->address_ = NULL;
+ buffer->last_usage_token_ = 0;
+ buffer->last_async_upload_token_ = 0;
+}
} // namespace gles2
} // namespace gpu
diff --git a/gpu/command_buffer/client/buffer_tracker.h b/gpu/command_buffer/client/buffer_tracker.h
index 3e50364..33bd94b 100644
--- a/gpu/command_buffer/client/buffer_tracker.h
+++ b/gpu/command_buffer/client/buffer_tracker.h
@@ -35,7 +35,8 @@ class GLES2_IMPL_EXPORT BufferTracker {
shm_offset_(shm_offset),
address_(address),
mapped_(false),
- transfer_ready_token_(0) {
+ last_usage_token_(0),
+ last_async_upload_token_(0) {
}
GLenum id() const {
@@ -66,12 +67,20 @@ class GLES2_IMPL_EXPORT BufferTracker {
return mapped_;
}
- void set_transfer_ready_token(int token) {
- transfer_ready_token_ = token;
+ void set_last_usage_token(int token) {
+ last_usage_token_ = token;
}
- uint32 transfer_ready_token() const {
- return transfer_ready_token_;
+ int last_usage_token() const {
+ return last_usage_token_;
+ }
+
+ void set_last_async_upload_token(uint32 async_token) {
+ last_async_upload_token_ = async_token;
+ }
+
+ GLuint last_async_upload_token() const {
+ return last_async_upload_token_;
}
private:
@@ -84,7 +93,8 @@ class GLES2_IMPL_EXPORT BufferTracker {
uint32 shm_offset_;
void* address_;
bool mapped_;
- int32 transfer_ready_token_;
+ int32 last_usage_token_;
+ GLuint last_async_upload_token_;
};
BufferTracker(MappedMemoryManager* manager);
@@ -96,7 +106,9 @@ class GLES2_IMPL_EXPORT BufferTracker {
// Frees the block of memory associated with buffer, pending the passage
// of a token.
- void FreePendingToken(Buffer*, int32 token);
+ void FreePendingToken(Buffer* buffer, int32 token);
+ void Unmanage(Buffer* buffer);
+ void Free(Buffer* buffer);
private:
typedef base::hash_map<GLuint, Buffer*> BufferMap;
diff --git a/gpu/command_buffer/client/buffer_tracker_unittest.cc b/gpu/command_buffer/client/buffer_tracker_unittest.cc
index a298844..f6174c0 100644
--- a/gpu/command_buffer/client/buffer_tracker_unittest.cc
+++ b/gpu/command_buffer/client/buffer_tracker_unittest.cc
@@ -42,6 +42,11 @@ class MockClientCommandBufferImpl : public MockClientCommandBuffer {
bool context_lost_;
};
+namespace {
+void EmptyPoll() {
+}
+}
+
class BufferTrackerTest : public testing::Test {
protected:
static const int32 kNumCommandEntries = 400;
@@ -53,7 +58,7 @@ class BufferTrackerTest : public testing::Test {
helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
helper_->Initialize(kCommandBufferSizeBytes);
mapped_memory_.reset(new MappedMemoryManager(
- helper_.get(), MappedMemoryManager::kNoLimit));
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
buffer_tracker_.reset(new BufferTracker(mapped_memory_.get()));
}
@@ -127,5 +132,22 @@ TEST_F(BufferTrackerTest, LostContext) {
buffer_tracker_->RemoveBuffer(kId);
}
+TEST_F(BufferTrackerTest, Unmanage) {
+ const GLuint kId = 123;
+ const GLsizeiptr size = 64;
+
+ BufferTracker::Buffer* buffer = buffer_tracker_->CreateBuffer(kId, size);
+ ASSERT_TRUE(buffer != NULL);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(size));
+
+ void* mem = buffer->address();
+ buffer_tracker_->Unmanage(buffer);
+ buffer_tracker_->RemoveBuffer(kId);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(size));
+
+ mapped_memory_->Free(mem);
+ EXPECT_EQ(mapped_memory_->bytes_in_use(), static_cast<size_t>(0));
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h
index 49230b4..a2de5ac 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -84,6 +84,15 @@ class GPU_EXPORT CommandBufferHelper {
// shutdown.
int32 InsertToken();
+ // Returns true if the token has passed.
+ // Parameters:
+ // the value of the token to check whether it has passed
+ bool HasTokenPassed(int32 token) const {
+ if (token > token_)
+ return true; // we wrapped
+ return last_token_read() >= token;
+ }
+
// Waits until the token of a particular value has passed through the command
// stream (i.e. commands inserted before that token have been executed).
// NOTE: This will call Flush if it needs to block.
diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc
index 0e90bf3..4e405e9 100644
--- a/gpu/command_buffer/client/fenced_allocator.cc
+++ b/gpu/command_buffer/client/fenced_allocator.cc
@@ -34,8 +34,10 @@ const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
#endif
FencedAllocator::FencedAllocator(unsigned int size,
- CommandBufferHelper *helper)
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback)
: helper_(helper),
+ poll_callback_(poll_callback),
bytes_in_use_(0) {
Block block = { FREE, 0, RoundDown(size), kUnusedToken };
blocks_.push_back(block);
@@ -203,10 +205,13 @@ FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
// Frees any blocks pending a token for which the token has been read.
void FencedAllocator::FreeUnused() {
- int32 last_token_read = helper_->last_token_read();
+ // Free any potential blocks that has its lifetime handled outside.
+ poll_callback_.Run();
+
for (unsigned int i = 0; i < blocks_.size();) {
Block& block = blocks_[i];
- if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) {
+ if (block.state == FREE_PENDING_TOKEN &&
+ helper_->HasTokenPassed(block.token)) {
block.state = FREE;
i = CollapseFreeBlock(i);
} else {
diff --git a/gpu/command_buffer/client/fenced_allocator.h b/gpu/command_buffer/client/fenced_allocator.h
index bb5c551..77fadc3 100644
--- a/gpu/command_buffer/client/fenced_allocator.h
+++ b/gpu/command_buffer/client/fenced_allocator.h
@@ -9,6 +9,7 @@
#include <vector>
+#include "base/bind.h"
#include "base/logging.h"
#include "gpu/command_buffer/common/types.h"
#include "gpu/gpu_export.h"
@@ -35,7 +36,8 @@ class GPU_EXPORT FencedAllocator {
// Creates a FencedAllocator. Note that the size of the buffer is passed, but
// not its base address: everything is handled as offsets into the buffer.
FencedAllocator(unsigned int size,
- CommandBufferHelper *helper);
+ CommandBufferHelper *helper,
+ const base::Closure& poll_callback);
~FencedAllocator();
@@ -136,6 +138,7 @@ class GPU_EXPORT FencedAllocator {
Offset AllocInBlock(BlockIndex index, unsigned int size);
CommandBufferHelper *helper_;
+ base::Closure poll_callback_;
Container blocks_;
size_t bytes_in_use_;
@@ -148,8 +151,9 @@ class FencedAllocatorWrapper {
public:
FencedAllocatorWrapper(unsigned int size,
CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
void* base)
- : allocator_(size, helper),
+ : allocator_(size, helper, poll_callback),
base_(base) { }
// Allocates a block of memory. If the buffer is out of directly available
diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc
index f213535..2db1328 100644
--- a/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -29,6 +29,7 @@ using testing::Truly;
using testing::Sequence;
using testing::DoAll;
using testing::Invoke;
+using testing::InvokeWithoutArgs;
using testing::_;
class BaseFencedAllocatorTest : public testing::Test {
@@ -88,6 +89,11 @@ class BaseFencedAllocatorTest : public testing::Test {
const unsigned int BaseFencedAllocatorTest::kBufferSize;
#endif
+namespace {
+void EmptyPoll() {
+}
+}
+
// Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
// it directly, not through the RPC mechanism), making sure Noops are ignored
@@ -96,7 +102,9 @@ class FencedAllocatorTest : public BaseFencedAllocatorTest {
protected:
virtual void SetUp() {
BaseFencedAllocatorTest::SetUp();
- allocator_.reset(new FencedAllocator(kBufferSize, helper_.get()));
+ allocator_.reset(new FencedAllocator(kBufferSize,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
}
virtual void TearDown() {
@@ -391,6 +399,63 @@ TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
}
+class FencedAllocatorPollTest : public BaseFencedAllocatorTest {
+ public:
+ static const unsigned int kAllocSize = 128;
+
+ MOCK_METHOD0(MockedPoll, void());
+
+ protected:
+ virtual void TearDown() {
+ // If the GpuScheduler posts any tasks, this forces them to run.
+ base::MessageLoop::current()->RunUntilIdle();
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+};
+
+TEST_F(FencedAllocatorPollTest, TestPoll) {
+ scoped_ptr<FencedAllocator> allocator(
+ new FencedAllocator(kBufferSize,
+ helper_.get(),
+ base::Bind(&FencedAllocatorPollTest::MockedPoll,
+ base::Unretained(this))));
+
+ FencedAllocator::Offset mem1 = allocator->Alloc(kAllocSize);
+ FencedAllocator::Offset mem2 = allocator->Alloc(kAllocSize);
+ EXPECT_NE(mem1, FencedAllocator::kInvalidOffset);
+ EXPECT_NE(mem2, FencedAllocator::kInvalidOffset);
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
+
+ // Check that no-op Poll doesn't affect the state.
+ EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 2);
+
+ // Check that freeing in Poll works.
+ base::Closure free_mem1_closure =
+ base::Bind(&FencedAllocator::Free,
+ base::Unretained(allocator.get()),
+ mem1);
+ EXPECT_CALL(*this, MockedPoll())
+ .WillOnce(InvokeWithoutArgs(&free_mem1_closure, &base::Closure::Run))
+ .RetiresOnSaturation();
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), kAllocSize * 1);
+
+ // Check that freeing still works.
+ EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
+ allocator->Free(mem2);
+ allocator->FreeUnused();
+ EXPECT_TRUE(allocator->CheckConsistency());
+ EXPECT_EQ(allocator->bytes_in_use(), 0u);
+
+ allocator.reset();
+}
+
// Test fixture for FencedAllocatorWrapper test - Creates a
// FencedAllocatorWrapper, using a CommandBufferHelper with a mock
// AsyncAPIInterface for its interface (calling it directly, not through the
@@ -406,7 +471,9 @@ class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
// something.
buffer_.reset(static_cast<char*>(base::AlignedAlloc(
kBufferSize, kAllocAlignment)));
- allocator_.reset(new FencedAllocatorWrapper(kBufferSize, helper_.get(),
+ allocator_.reset(new FencedAllocatorWrapper(kBufferSize,
+ helper_.get(),
+ base::Bind(&EmptyPoll),
buffer_.get()));
}
diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h
index ee380a6..6aab61e 100644
--- a/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -881,6 +881,9 @@ void GLES2AsyncTexImage2DCHROMIUM(GLenum target,
void GLES2WaitAsyncTexImage2DCHROMIUM(GLenum target) {
gles2::GetGLContext()->WaitAsyncTexImage2DCHROMIUM(target);
}
+void GLES2WaitAllAsyncTexImage2DCHROMIUM() {
+ gles2::GetGLContext()->WaitAllAsyncTexImage2DCHROMIUM();
+}
void GLES2DiscardFramebufferEXT(GLenum target,
GLsizei count,
const GLenum* attachments) {
@@ -1259,6 +1262,9 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glAsyncTexImage2DCHROMIUM), },
{"glWaitAsyncTexImage2DCHROMIUM",
reinterpret_cast<GLES2FunctionPointer>(glWaitAsyncTexImage2DCHROMIUM), },
+ {"glWaitAllAsyncTexImage2DCHROMIUM",
+ reinterpret_cast<GLES2FunctionPointer>(
+ glWaitAllAsyncTexImage2DCHROMIUM), },
{"glDiscardFramebufferEXT",
reinterpret_cast<GLES2FunctionPointer>(glDiscardFramebufferEXT), },
{"glLoseContextCHROMIUM",
diff --git a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index ffe9e54..8150d64 100644
--- a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -2097,7 +2097,10 @@ void AsyncTexSubImage2DCHROMIUM(GLenum target,
GLenum format,
GLenum type,
uint32 data_shm_id,
- uint32 data_shm_offset) {
+ uint32 data_shm_offset,
+ uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset) {
gles2::cmds::AsyncTexSubImage2DCHROMIUM* c =
GetCmdSpace<gles2::cmds::AsyncTexSubImage2DCHROMIUM>();
if (c) {
@@ -2110,7 +2113,10 @@ void AsyncTexSubImage2DCHROMIUM(GLenum target,
format,
type,
data_shm_id,
- data_shm_offset);
+ data_shm_offset,
+ async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
}
}
@@ -2123,7 +2129,10 @@ void AsyncTexImage2DCHROMIUM(GLenum target,
GLenum format,
GLenum type,
uint32 pixels_shm_id,
- uint32 pixels_shm_offset) {
+ uint32 pixels_shm_offset,
+ uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset) {
gles2::cmds::AsyncTexImage2DCHROMIUM* c =
GetCmdSpace<gles2::cmds::AsyncTexImage2DCHROMIUM>();
if (c) {
@@ -2136,7 +2145,10 @@ void AsyncTexImage2DCHROMIUM(GLenum target,
format,
type,
pixels_shm_id,
- pixels_shm_offset);
+ pixels_shm_offset,
+ async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
}
}
@@ -2148,6 +2160,14 @@ void WaitAsyncTexImage2DCHROMIUM(GLenum target) {
}
}
+void WaitAllAsyncTexImage2DCHROMIUM() {
+ gles2::cmds::WaitAllAsyncTexImage2DCHROMIUM* c =
+ GetCmdSpace<gles2::cmds::WaitAllAsyncTexImage2DCHROMIUM>();
+ if (c) {
+ c->Init();
+ }
+}
+
void DiscardFramebufferEXT(GLenum target,
GLsizei count,
uint32 attachments_shm_id,
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
index 147714a..0dd5161 100644
--- a/gpu/command_buffer/client/gles2_implementation.cc
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -15,6 +15,7 @@
#include <string>
#include <GLES2/gl2ext.h>
#include <GLES2/gl2extchromium.h>
+#include "base/bind.h"
#include "gpu/command_buffer/client/buffer_tracker.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
#include "gpu/command_buffer/client/program_info_manager.h"
@@ -106,6 +107,10 @@ GLES2Implementation::GLES2Implementation(
bound_array_buffer_id_(0),
bound_pixel_pack_transfer_buffer_id_(0),
bound_pixel_unpack_transfer_buffer_id_(0),
+ async_upload_token_(0),
+ async_upload_sync_(NULL),
+ async_upload_sync_shm_id_(0),
+ async_upload_sync_shm_offset_(0),
error_bits_(0),
debug_(false),
use_count_(0),
@@ -151,7 +156,15 @@ bool GLES2Implementation::Initialize(
return false;
}
- mapped_memory_.reset(new MappedMemoryManager(helper_, mapped_memory_limit));
+ mapped_memory_.reset(
+ new MappedMemoryManager(
+ helper_,
+ base::Bind(&GLES2Implementation::PollAsyncUploads,
+ // The mapped memory manager is owned by |this| here, and
+ // since its destroyed before before we destroy ourselves
+ // we don't need extra safety measures for this closure.
+ base::Unretained(this)),
+ mapped_memory_limit));
unsigned chunk_size = 2 * 1024 * 1024;
if (mapped_memory_limit != kNoLimit) {
@@ -278,6 +291,13 @@ GLES2Implementation::~GLES2Implementation() {
buffer_tracker_.reset();
+ FreeAllAsyncUploadBuffers();
+
+ if (async_upload_sync_) {
+ mapped_memory_->Free(async_upload_sync_);
+ async_upload_sync_ = NULL;
+ }
+
// Make sure the commands make it the service.
WaitForCmd();
}
@@ -307,6 +327,7 @@ void GLES2Implementation::FreeUnusedSharedMemory() {
}
void GLES2Implementation::FreeEverything() {
+ FreeAllAsyncUploadBuffers();
WaitForCmd();
query_tracker_->Shrink();
FreeUnusedSharedMemory();
@@ -1364,13 +1385,8 @@ void GLES2Implementation::BufferDataHelper(
}
BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffer_id);
- if (buffer) {
- // Free buffer memory, pending the passage of a token.
- buffer_tracker_->FreePendingToken(buffer, helper_->InsertToken());
-
- // Remove old buffer.
- buffer_tracker_->RemoveBuffer(buffer_id);
- }
+ if (buffer)
+ RemoveTransferBuffer(buffer);
// Create new buffer.
buffer = buffer_tracker_->CreateBuffer(buffer_id, size);
@@ -1498,6 +1514,30 @@ void GLES2Implementation::BufferSubData(
CheckGLError();
}
+void GLES2Implementation::RemoveTransferBuffer(BufferTracker::Buffer* buffer) {
+ int32 token = buffer->last_usage_token();
+ uint32 async_token = buffer->last_async_upload_token();
+
+ if (async_token) {
+ if (HasAsyncUploadTokenPassed(async_token)) {
+ buffer_tracker_->Free(buffer);
+ } else {
+ detached_async_upload_memory_.push_back(
+ std::make_pair(buffer->address(), async_token));
+ buffer_tracker_->Unmanage(buffer);
+ }
+ } else if (token) {
+ if (helper_->HasTokenPassed(token))
+ buffer_tracker_->Free(buffer);
+ else
+ buffer_tracker_->FreePendingToken(buffer, token);
+ } else {
+ buffer_tracker_->Free(buffer);
+ }
+
+ buffer_tracker_->RemoveBuffer(buffer->id());
+}
+
bool GLES2Implementation::GetBoundPixelTransferBuffer(
GLenum target,
const char* function_name,
@@ -1573,7 +1613,7 @@ void GLES2Implementation::CompressedTexImage2D(
helper_->CompressedTexImage2D(
target, level, internalformat, width, height, border, image_size,
buffer->shm_id(), buffer->shm_offset() + offset);
- buffer->set_transfer_ready_token(helper_->InsertToken());
+ buffer->set_last_usage_token(helper_->InsertToken());
}
return;
}
@@ -1614,7 +1654,7 @@ void GLES2Implementation::CompressedTexSubImage2D(
helper_->CompressedTexSubImage2D(
target, level, xoffset, yoffset, width, height, format, image_size,
buffer->shm_id(), buffer->shm_offset() + offset);
- buffer->set_transfer_ready_token(helper_->InsertToken());
+ buffer->set_last_usage_token(helper_->InsertToken());
CheckGLError();
}
return;
@@ -1701,7 +1741,7 @@ void GLES2Implementation::TexImage2D(
helper_->TexImage2D(
target, level, internalformat, width, height, border, format, type,
buffer->shm_id(), buffer->shm_offset() + offset);
- buffer->set_transfer_ready_token(helper_->InsertToken());
+ buffer->set_last_usage_token(helper_->InsertToken());
CheckGLError();
}
return;
@@ -1807,7 +1847,7 @@ void GLES2Implementation::TexSubImage2D(
helper_->TexSubImage2D(
target, level, xoffset, yoffset, width, height, format, type,
buffer->shm_id(), buffer->shm_offset() + offset, false);
- buffer->set_transfer_ready_token(helper_->InsertToken());
+ buffer->set_last_usage_token(helper_->InsertToken());
CheckGLError();
}
return;
@@ -2390,24 +2430,24 @@ void GLES2Implementation::GenQueriesEXTHelper(
// deleted the resource.
bool GLES2Implementation::BindBufferHelper(
- GLenum target, GLuint buffer) {
+ GLenum target, GLuint buffer_id) {
// TODO(gman): See note #1 above.
bool changed = false;
switch (target) {
case GL_ARRAY_BUFFER:
- if (bound_array_buffer_id_ != buffer) {
- bound_array_buffer_id_ = buffer;
+ if (bound_array_buffer_id_ != buffer_id) {
+ bound_array_buffer_id_ = buffer_id;
changed = true;
}
break;
case GL_ELEMENT_ARRAY_BUFFER:
- changed = vertex_array_object_manager_->BindElementArray(buffer);
+ changed = vertex_array_object_manager_->BindElementArray(buffer_id);
break;
case GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM:
- bound_pixel_pack_transfer_buffer_id_ = buffer;
+ bound_pixel_pack_transfer_buffer_id_ = buffer_id;
break;
case GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM:
- bound_pixel_unpack_transfer_buffer_id_ = buffer;
+ bound_pixel_unpack_transfer_buffer_id_ = buffer_id;
break;
default:
changed = true;
@@ -2415,7 +2455,7 @@ bool GLES2Implementation::BindBufferHelper(
}
// TODO(gman): There's a bug here. If the target is invalid the ID will not be
// used even though it's marked it as used here.
- GetIdHandler(id_namespaces::kBuffers)->MarkAsUsedForBind(buffer);
+ GetIdHandler(id_namespaces::kBuffers)->MarkAsUsedForBind(buffer_id);
return changed;
}
@@ -2558,13 +2598,11 @@ void GLES2Implementation::DeleteBuffersHelper(
bound_array_buffer_id_ = 0;
}
vertex_array_object_manager_->UnbindBuffer(buffers[ii]);
+
BufferTracker::Buffer* buffer = buffer_tracker_->GetBuffer(buffers[ii]);
- if (buffer) {
- // Free buffer memory, pending the passage of a token.
- buffer_tracker_->FreePendingToken(buffer, helper_->InsertToken());
- // Remove buffer.
- buffer_tracker_->RemoveBuffer(buffers[ii]);
- }
+ if (buffer)
+ RemoveTransferBuffer(buffer);
+
if (buffers[ii] == bound_pixel_unpack_transfer_buffer_id_) {
bound_pixel_unpack_transfer_buffer_id_ = 0;
}
@@ -3616,9 +3654,9 @@ void* GLES2Implementation::MapBufferCHROMIUM(GLuint target, GLenum access) {
// with this method of synchronization. Until this is fixed,
// MapBufferCHROMIUM will not block even if the transfer is not ready
// for these calls.
- if (buffer->transfer_ready_token()) {
- helper_->WaitForToken(buffer->transfer_ready_token());
- buffer->set_transfer_ready_token(0);
+ if (buffer->last_usage_token()) {
+ helper_->WaitForToken(buffer->last_usage_token());
+ buffer->set_last_usage_token(0);
}
buffer->set_mapped(true);
@@ -3652,6 +3690,71 @@ GLboolean GLES2Implementation::UnmapBufferCHROMIUM(GLuint target) {
return true;
}
+bool GLES2Implementation::EnsureAsyncUploadSync() {
+ if (async_upload_sync_)
+ return true;
+
+ int32 shm_id;
+ unsigned int shm_offset;
+ void* mem = mapped_memory_->Alloc(sizeof(AsyncUploadSync),
+ &shm_id,
+ &shm_offset);
+ if (!mem)
+ return false;
+
+ async_upload_sync_shm_id_ = shm_id;
+ async_upload_sync_shm_offset_ = shm_offset;
+ async_upload_sync_ = static_cast<AsyncUploadSync*>(mem);
+ async_upload_sync_->Reset();
+
+ return true;
+}
+
+uint32 GLES2Implementation::NextAsyncUploadToken() {
+ async_upload_token_++;
+ if (async_upload_token_ == 0)
+ async_upload_token_++;
+ return async_upload_token_;
+}
+
+void GLES2Implementation::PollAsyncUploads() {
+ if (!async_upload_sync_)
+ return;
+
+ if (helper_->IsContextLost()) {
+ DetachedAsyncUploadMemoryList::iterator it =
+ detached_async_upload_memory_.begin();
+ while (it != detached_async_upload_memory_.end()) {
+ mapped_memory_->Free(it->first);
+ it = detached_async_upload_memory_.erase(it);
+ }
+ return;
+ }
+
+ DetachedAsyncUploadMemoryList::iterator it =
+ detached_async_upload_memory_.begin();
+ while (it != detached_async_upload_memory_.end()) {
+ if (HasAsyncUploadTokenPassed(it->second)) {
+ mapped_memory_->Free(it->first);
+ it = detached_async_upload_memory_.erase(it);
+ } else {
+ break;
+ }
+ }
+}
+
+void GLES2Implementation::FreeAllAsyncUploadBuffers() {
+ // Free all completed unmanaged async uploads buffers.
+ PollAsyncUploads();
+
+ // Synchronously free rest of the unmanaged async upload buffers.
+ if (!detached_async_upload_memory_.empty()) {
+ WaitAllAsyncTexImage2DCHROMIUM();
+ WaitForCmd();
+ PollAsyncUploads();
+ }
+}
+
void GLES2Implementation::AsyncTexImage2DCHROMIUM(
GLenum target, GLint level, GLint internalformat, GLsizei width,
GLsizei height, GLint border, GLenum format, GLenum type,
@@ -3683,7 +3786,12 @@ void GLES2Implementation::AsyncTexImage2DCHROMIUM(
if (!pixels && !bound_pixel_unpack_transfer_buffer_id_) {
helper_->AsyncTexImage2DCHROMIUM(
target, level, internalformat, width, height, border, format, type,
- 0, 0);
+ 0, 0, 0, 0, 0);
+ return;
+ }
+
+ if (!EnsureAsyncUploadSync()) {
+ SetGLError(GL_OUT_OF_MEMORY, "glTexImage2D", "out of memory");
return;
}
@@ -3696,9 +3804,13 @@ void GLES2Implementation::AsyncTexImage2DCHROMIUM(
bound_pixel_unpack_transfer_buffer_id_,
"glAsyncTexImage2DCHROMIUM", offset, size);
if (buffer && buffer->shm_id() != -1) {
+ uint32 async_token = NextAsyncUploadToken();
+ buffer->set_last_async_upload_token(async_token);
helper_->AsyncTexImage2DCHROMIUM(
target, level, internalformat, width, height, border, format, type,
- buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->shm_id(), buffer->shm_offset() + offset,
+ async_token,
+ async_upload_sync_shm_id_, async_upload_sync_shm_offset_);
}
}
@@ -3731,6 +3843,11 @@ void GLES2Implementation::AsyncTexSubImage2DCHROMIUM(
return;
}
+ if (!EnsureAsyncUploadSync()) {
+ SetGLError(GL_OUT_OF_MEMORY, "glTexImage2D", "out of memory");
+ return;
+ }
+
// Async uploads require a transfer buffer to be bound.
// TODO(hubbe): Make MapBufferCHROMIUM block if someone tries to re-use
// the buffer before the transfer is finished. (Currently such
@@ -3740,9 +3857,13 @@ void GLES2Implementation::AsyncTexSubImage2DCHROMIUM(
bound_pixel_unpack_transfer_buffer_id_,
"glAsyncTexSubImage2DCHROMIUM", offset, size);
if (buffer && buffer->shm_id() != -1) {
+ uint32 async_token = NextAsyncUploadToken();
+ buffer->set_last_async_upload_token(async_token);
helper_->AsyncTexSubImage2DCHROMIUM(
target, level, xoffset, yoffset, width, height, format, type,
- buffer->shm_id(), buffer->shm_offset() + offset);
+ buffer->shm_id(), buffer->shm_offset() + offset,
+ async_token,
+ async_upload_sync_shm_id_, async_upload_sync_shm_offset_);
}
}
@@ -3754,6 +3875,14 @@ void GLES2Implementation::WaitAsyncTexImage2DCHROMIUM(GLenum target) {
CheckGLError();
}
+void GLES2Implementation::WaitAllAsyncTexImage2DCHROMIUM() {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix()
+ << "] glWaitAllAsyncTexImage2DCHROMIUM()");
+ helper_->WaitAllAsyncTexImage2DCHROMIUM();
+ CheckGLError();
+}
+
GLuint GLES2Implementation::InsertSyncPointCHROMIUM() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glInsertSyncPointCHROMIUM");
diff --git a/gpu/command_buffer/client/gles2_implementation.h b/gpu/command_buffer/client/gles2_implementation.h
index bd5ee04..b120a62 100644
--- a/gpu/command_buffer/client/gles2_implementation.h
+++ b/gpu/command_buffer/client/gles2_implementation.h
@@ -601,6 +601,38 @@ class GLES2_IMPL_EXPORT GLES2Implementation
void OnSwapBuffersComplete();
+ // Remove the transfer buffer from the buffer tracker. For buffers used
+ // asynchronously the memory is free:ed if the upload has completed. For
+ // other buffers, the memory is either free:ed immediately or free:ed pending
+ // a token.
+ void RemoveTransferBuffer(BufferTracker::Buffer* buffer);
+
+ // Returns true if the async upload token has passed.
+ //
+ // NOTE: This will detect wrapped async tokens by checking if the most
+ // significant bit of async token to check is 1 but the last read is 0, i.e.
+ // the uint32 wrapped.
+ bool HasAsyncUploadTokenPassed(uint32 token) const {
+ return async_upload_sync_->HasAsyncUploadTokenPassed(token);
+ }
+
+ // Get the next async upload token.
+ uint32 NextAsyncUploadToken();
+
+ // Ensure that the shared memory used for synchronizing async upload tokens
+ // has been mapped.
+ //
+ // Returns false on error, true on success.
+ bool EnsureAsyncUploadSync();
+
+ // Checks the last read asynchronously upload token and frees any unmanaged
+ // transfer buffer that has its async token passed.
+ void PollAsyncUploads();
+
+ // Free every async upload buffer. If some async upload buffer is still in use
+ // wait for them to finish before freeing.
+ void FreeAllAsyncUploadBuffers();
+
bool GetBoundPixelTransferBuffer(
GLenum target, const char* function_name, GLuint* buffer_id);
BufferTracker::Buffer* GetBoundPixelUnpackTransferBufferIfValid(
@@ -673,6 +705,18 @@ class GLES2_IMPL_EXPORT GLES2Implementation
GLuint bound_pixel_pack_transfer_buffer_id_;
GLuint bound_pixel_unpack_transfer_buffer_id_;
+ // The current asynchronous pixel buffer upload token.
+ uint32 async_upload_token_;
+
+ // The shared memory used for synchronizing asynchronous upload tokens.
+ AsyncUploadSync* async_upload_sync_;
+ int32 async_upload_sync_shm_id_;
+ unsigned int async_upload_sync_shm_offset_;
+
+ // Unmanaged pixel transfer buffer memory pending asynchronous upload token.
+ typedef std::list<std::pair<void*, uint32> > DetachedAsyncUploadMemoryList;
+ DetachedAsyncUploadMemoryList detached_async_upload_memory_;
+
// Client side management for vertex array objects. Needed to correctly
// track client side arrays.
scoped_ptr<VertexArrayObjectManager> vertex_array_object_manager_;
diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h
index 7301f50..03ed808 100644
--- a/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -696,6 +696,8 @@ virtual void AsyncTexImage2DCHROMIUM(GLenum target,
virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
+
virtual void DiscardFramebufferEXT(GLenum target,
GLsizei count,
const GLenum* attachments) OVERRIDE;
diff --git a/gpu/command_buffer/client/gles2_interface_autogen.h b/gpu/command_buffer/client/gles2_interface_autogen.h
index 4da19dc..c3c3f90 100644
--- a/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -478,6 +478,7 @@ virtual void AsyncTexImage2DCHROMIUM(GLenum target,
GLenum type,
const void* pixels) = 0;
virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) = 0;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() = 0;
virtual void DiscardFramebufferEXT(GLenum target,
GLsizei count,
const GLenum* attachments) = 0;
diff --git a/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index e1db978..4d7e1f9 100644
--- a/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -502,6 +502,7 @@ virtual void AsyncTexImage2DCHROMIUM(GLenum target,
GLenum type,
const void* pixels) OVERRIDE;
virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
virtual void DiscardFramebufferEXT(GLenum target,
GLsizei count,
const GLenum* attachments) OVERRIDE;
diff --git a/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 61c81ee..639cadd 100644
--- a/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -620,6 +620,7 @@ void GLES2InterfaceStub::AsyncTexImage2DCHROMIUM(GLenum /* target */,
GLenum /* type */,
const void* /* pixels */) {}
void GLES2InterfaceStub::WaitAsyncTexImage2DCHROMIUM(GLenum /* target */) {}
+void GLES2InterfaceStub::WaitAllAsyncTexImage2DCHROMIUM() {}
void GLES2InterfaceStub::DiscardFramebufferEXT(
GLenum /* target */,
GLsizei /* count */,
diff --git a/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 43cc090d..89bb479 100644
--- a/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -502,6 +502,7 @@ virtual void AsyncTexImage2DCHROMIUM(GLenum target,
GLenum type,
const void* pixels) OVERRIDE;
virtual void WaitAsyncTexImage2DCHROMIUM(GLenum target) OVERRIDE;
+virtual void WaitAllAsyncTexImage2DCHROMIUM() OVERRIDE;
virtual void DiscardFramebufferEXT(GLenum target,
GLsizei count,
const GLenum* attachments) OVERRIDE;
diff --git a/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index e915d7b..0734ab1 100644
--- a/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -1441,6 +1441,12 @@ void GLES2TraceImplementation::WaitAsyncTexImage2DCHROMIUM(GLenum target) {
gl_->WaitAsyncTexImage2DCHROMIUM(target);
}
+void GLES2TraceImplementation::WaitAllAsyncTexImage2DCHROMIUM() {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu",
+ "GLES2Trace::WaitAllAsyncTexImage2DCHROMIUM");
+ gl_->WaitAllAsyncTexImage2DCHROMIUM();
+}
+
void GLES2TraceImplementation::DiscardFramebufferEXT(
GLenum target,
GLsizei count,
diff --git a/gpu/command_buffer/client/mapped_memory.cc b/gpu/command_buffer/client/mapped_memory.cc
index aeab080..b62ca27 100644
--- a/gpu/command_buffer/client/mapped_memory.cc
+++ b/gpu/command_buffer/client/mapped_memory.cc
@@ -15,17 +15,20 @@ namespace gpu {
MemoryChunk::MemoryChunk(int32 shm_id,
scoped_refptr<gpu::Buffer> shm,
- CommandBufferHelper* helper)
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback)
: shm_id_(shm_id),
shm_(shm),
- allocator_(shm->size(), helper, shm->memory()) {}
+ allocator_(shm->size(), helper, poll_callback, shm->memory()) {}
MemoryChunk::~MemoryChunk() {}
MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
size_t unused_memory_reclaim_limit)
: chunk_size_multiple_(1),
helper_(helper),
+ poll_callback_(poll_callback),
allocated_memory_(0),
max_free_bytes_(unused_memory_reclaim_limit) {
}
@@ -88,7 +91,7 @@ void* MappedMemoryManager::Alloc(
cmd_buf->CreateTransferBuffer(chunk_size, &id);
if (id < 0)
return NULL;
- MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
+ MemoryChunk* mc = new MemoryChunk(id, shm, helper_, poll_callback_);
allocated_memory_ += mc->GetSize();
chunks_.push_back(mc);
void* mem = mc->Alloc(size);
diff --git a/gpu/command_buffer/client/mapped_memory.h b/gpu/command_buffer/client/mapped_memory.h
index 00251e8..e7f62ff 100644
--- a/gpu/command_buffer/client/mapped_memory.h
+++ b/gpu/command_buffer/client/mapped_memory.h
@@ -5,6 +5,7 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
#define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
+#include "base/bind.h"
#include "base/memory/scoped_vector.h"
#include "gpu/command_buffer/client/fenced_allocator.h"
#include "gpu/command_buffer/common/buffer.h"
@@ -20,7 +21,8 @@ class GPU_EXPORT MemoryChunk {
public:
MemoryChunk(int32 shm_id,
scoped_refptr<gpu::Buffer> shm,
- CommandBufferHelper* helper);
+ CommandBufferHelper* helper,
+ const base::Closure& poll_callback);
~MemoryChunk();
// Gets the size of the largest free block that is available without waiting.
@@ -121,6 +123,7 @@ class GPU_EXPORT MappedMemoryManager {
// |unused_memory_reclaim_limit|: When exceeded this causes pending memory
// to be reclaimed before allocating more memory.
MappedMemoryManager(CommandBufferHelper* helper,
+ const base::Closure& poll_callback,
size_t unused_memory_reclaim_limit);
~MappedMemoryManager();
@@ -165,6 +168,15 @@ class GPU_EXPORT MappedMemoryManager {
return chunks_.size();
}
+ size_t bytes_in_use() const {
+ size_t bytes_in_use = 0;
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ bytes_in_use += chunk->bytes_in_use();
+ }
+ return bytes_in_use;
+ }
+
// Used for testing
size_t allocated_memory() const {
return allocated_memory_;
@@ -176,6 +188,7 @@ class GPU_EXPORT MappedMemoryManager {
// size a chunk is rounded up to.
unsigned int chunk_size_multiple_;
CommandBufferHelper* helper_;
+ base::Closure poll_callback_;
MemoryChunkVector chunks_;
size_t allocated_memory_;
size_t max_free_bytes_;
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
index 3e174fa..d853119 100644
--- a/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/client/mapped_memory.h"
+#include <list>
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
@@ -85,6 +86,11 @@ class MappedMemoryTestBase : public testing::Test {
const unsigned int MappedMemoryTestBase::kBufferSize;
#endif
+namespace {
+void EmptyPoll() {
+}
+}
+
// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
// it directly, not through the RPC mechanism), making sure Noops are ignored
@@ -97,7 +103,10 @@ class MemoryChunkTest : public MappedMemoryTestBase {
scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
shared_memory->CreateAndMapAnonymous(kBufferSize);
buffer_ = new gpu::Buffer(shared_memory.Pass(), kBufferSize);
- chunk_.reset(new MemoryChunk(kShmId, buffer_, helper_.get()));
+ chunk_.reset(new MemoryChunk(kShmId,
+ buffer_,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
}
virtual void TearDown() {
@@ -148,11 +157,16 @@ TEST_F(MemoryChunkTest, Basic) {
}
class MappedMemoryManagerTest : public MappedMemoryTestBase {
+ public:
+ MappedMemoryManager* manager() const {
+ return manager_.get();
+ }
+
protected:
virtual void SetUp() {
MappedMemoryTestBase::SetUp();
manager_.reset(new MappedMemoryManager(
- helper_.get(), MappedMemoryManager::kNoLimit));
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
}
virtual void TearDown() {
@@ -312,7 +326,8 @@ TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
const unsigned int kChunkSize = 2048;
// Reset the manager with a memory limit.
- manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize));
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
manager_->set_chunk_size_multiple(kChunkSize);
// Allocate one chunk worth of memory.
@@ -340,7 +355,8 @@ TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
const unsigned int kSize = 1024;
// Reset the manager with a memory limit.
- manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kSize));
const unsigned int kChunkSize = 2 * 1024;
manager_->set_chunk_size_multiple(kChunkSize);
@@ -386,4 +402,55 @@ TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
}
+namespace {
+void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
+ std::list<void*>::iterator it = list->begin();
+ while (it != list->end()) {
+ void* address = *it;
+ test->manager()->Free(address);
+ it = list->erase(it);
+ }
+}
+}
+
+TEST_F(MappedMemoryManagerTest, Poll) {
+ std::list<void*> unmanaged_memory_list;
+
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(),
+ base::Bind(&Poll, this, &unmanaged_memory_list),
+ kSize));
+
+ // Allocate kSize bytes. Don't add the address to
+ // the unmanaged memory list, so that it won't be free:ed just yet.
+ int32 id1;
+ unsigned int offset1;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize);
+
+ // Allocate kSize more bytes, and make sure we grew.
+ int32 id2;
+ unsigned int offset2;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ // Make the unmanaged buffer be released next time FreeUnused() is called
+ // in MappedMemoryManager/FencedAllocator. This happens for example when
+ // allocating new memory.
+ unmanaged_memory_list.push_back(mem1);
+
+ // Allocate kSize more bytes. This should poll unmanaged memory, which now
+ // should free the previously allocated unmanaged memory.
+ int32 id3;
+ unsigned int offset3;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ manager_->Free(mem2);
+ manager_->Free(mem3);
+ EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/client/query_tracker_unittest.cc b/gpu/command_buffer/client/query_tracker_unittest.cc
index ce299f0..0820a99 100644
--- a/gpu/command_buffer/client/query_tracker_unittest.cc
+++ b/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -18,6 +18,11 @@
namespace gpu {
namespace gles2 {
+namespace {
+void EmptyPoll() {
+}
+}
+
class QuerySyncManagerTest : public testing::Test {
protected:
static const int32 kNumCommandEntries = 400;
@@ -29,7 +34,7 @@ class QuerySyncManagerTest : public testing::Test {
helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
helper_->Initialize(kCommandBufferSizeBytes);
mapped_memory_.reset(new MappedMemoryManager(
- helper_.get(), MappedMemoryManager::kNoLimit));
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
sync_manager_.reset(new QuerySyncManager(mapped_memory_.get()));
}
@@ -83,7 +88,7 @@ class QueryTrackerTest : public testing::Test {
helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
helper_->Initialize(kCommandBufferSizeBytes);
mapped_memory_.reset(new MappedMemoryManager(
- helper_.get(), MappedMemoryManager::kNoLimit));
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
query_tracker_.reset(new QueryTracker(mapped_memory_.get()));
}
diff --git a/gpu/command_buffer/cmd_buffer_functions.txt b/gpu/command_buffer/cmd_buffer_functions.txt
index 0aba02e..73026fa 100644
--- a/gpu/command_buffer/cmd_buffer_functions.txt
+++ b/gpu/command_buffer/cmd_buffer_functions.txt
@@ -213,6 +213,7 @@ GL_APICALL void GL_APIENTRY glTraceEndCHROMIUM (void);
GL_APICALL void GL_APIENTRY glAsyncTexSubImage2DCHROMIUM (GLenumTextureTarget target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenumTextureFormat format, GLenumPixelType type, const void* data);
GL_APICALL void GL_APIENTRY glAsyncTexImage2DCHROMIUM (GLenumTextureTarget target, GLint level, GLintTextureInternalFormat internalformat, GLsizei width, GLsizei height, GLintTextureBorder border, GLenumTextureFormat format, GLenumPixelType type, const void* pixels);
GL_APICALL void GL_APIENTRY glWaitAsyncTexImage2DCHROMIUM (GLenumTextureTarget target);
+GL_APICALL void GL_APIENTRY glWaitAllAsyncTexImage2DCHROMIUM (void);
GL_APICALL void GL_APIENTRY glDiscardFramebufferEXT (GLenum target, GLsizei count, const GLenum* attachments);
GL_APICALL void GL_APIENTRY glLoseContextCHROMIUM (GLenum current, GLenum other);
GL_APICALL GLuint GL_APIENTRY glInsertSyncPointCHROMIUM (void);
diff --git a/gpu/command_buffer/common/gles2_cmd_format.h b/gpu/command_buffer/common/gles2_cmd_format.h
index 736c15b..f11b1e9 100644
--- a/gpu/command_buffer/common/gles2_cmd_format.h
+++ b/gpu/command_buffer/common/gles2_cmd_format.h
@@ -152,6 +152,25 @@ struct QuerySync {
uint64 result;
};
+struct AsyncUploadSync {
+ void Reset() {
+ base::subtle::Release_Store(&async_upload_token, 0);
+ }
+
+ void SetAsyncUploadToken(uint32 token) {
+ DCHECK_NE(token, 0u);
+ base::subtle::Release_Store(&async_upload_token, token);
+ }
+
+ bool HasAsyncUploadTokenPassed(uint32 token) {
+ DCHECK_NE(token, 0u);
+ uint32_t current_token = base::subtle::Acquire_Load(&async_upload_token);
+ return (current_token - token < 0x80000000);
+ }
+
+ base::subtle::Atomic32 async_upload_token;
+};
+
COMPILE_ASSERT(sizeof(ProgramInput) == 20, ProgramInput_size_not_20);
COMPILE_ASSERT(offsetof(ProgramInput, type) == 0,
OffsetOf_ProgramInput_type_not_0);
diff --git a/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index c678d24..49888b3 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -10102,7 +10102,10 @@ struct AsyncTexSubImage2DCHROMIUM {
GLenum _format,
GLenum _type,
uint32 _data_shm_id,
- uint32 _data_shm_offset) {
+ uint32 _data_shm_offset,
+ uint32 _async_upload_token,
+ uint32 _sync_data_shm_id,
+ uint32 _sync_data_shm_offset) {
SetHeader();
target = _target;
level = _level;
@@ -10114,6 +10117,9 @@ struct AsyncTexSubImage2DCHROMIUM {
type = _type;
data_shm_id = _data_shm_id;
data_shm_offset = _data_shm_offset;
+ async_upload_token = _async_upload_token;
+ sync_data_shm_id = _sync_data_shm_id;
+ sync_data_shm_offset = _sync_data_shm_offset;
}
void* Set(void* cmd,
@@ -10126,7 +10132,10 @@ struct AsyncTexSubImage2DCHROMIUM {
GLenum _format,
GLenum _type,
uint32 _data_shm_id,
- uint32 _data_shm_offset) {
+ uint32 _data_shm_offset,
+ uint32 _async_upload_token,
+ uint32 _sync_data_shm_id,
+ uint32 _sync_data_shm_offset) {
static_cast<ValueType*>(cmd)->Init(_target,
_level,
_xoffset,
@@ -10136,7 +10145,10 @@ struct AsyncTexSubImage2DCHROMIUM {
_format,
_type,
_data_shm_id,
- _data_shm_offset);
+ _data_shm_offset,
+ _async_upload_token,
+ _sync_data_shm_id,
+ _sync_data_shm_offset);
return NextCmdAddress<ValueType>(cmd);
}
@@ -10151,10 +10163,13 @@ struct AsyncTexSubImage2DCHROMIUM {
uint32 type;
uint32 data_shm_id;
uint32 data_shm_offset;
+ uint32 async_upload_token;
+ uint32 sync_data_shm_id;
+ uint32 sync_data_shm_offset;
};
-COMPILE_ASSERT(sizeof(AsyncTexSubImage2DCHROMIUM) == 44,
- Sizeof_AsyncTexSubImage2DCHROMIUM_is_not_44);
+COMPILE_ASSERT(sizeof(AsyncTexSubImage2DCHROMIUM) == 56,
+ Sizeof_AsyncTexSubImage2DCHROMIUM_is_not_56);
COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, header) == 0,
OffsetOf_AsyncTexSubImage2DCHROMIUM_header_not_0);
COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, target) == 4,
@@ -10177,6 +10192,12 @@ COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, data_shm_id) == 36,
OffsetOf_AsyncTexSubImage2DCHROMIUM_data_shm_id_not_36);
COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, data_shm_offset) == 40,
OffsetOf_AsyncTexSubImage2DCHROMIUM_data_shm_offset_not_40);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, async_upload_token) == 44,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_async_upload_token_not_44);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, sync_data_shm_id) == 48,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_sync_data_shm_id_not_48);
+COMPILE_ASSERT(offsetof(AsyncTexSubImage2DCHROMIUM, sync_data_shm_offset) == 52,
+ OffsetOf_AsyncTexSubImage2DCHROMIUM_sync_data_shm_offset_not_52);
struct AsyncTexImage2DCHROMIUM {
typedef AsyncTexImage2DCHROMIUM ValueType;
@@ -10199,7 +10220,10 @@ struct AsyncTexImage2DCHROMIUM {
GLenum _format,
GLenum _type,
uint32 _pixels_shm_id,
- uint32 _pixels_shm_offset) {
+ uint32 _pixels_shm_offset,
+ uint32 _async_upload_token,
+ uint32 _sync_data_shm_id,
+ uint32 _sync_data_shm_offset) {
SetHeader();
target = _target;
level = _level;
@@ -10211,6 +10235,9 @@ struct AsyncTexImage2DCHROMIUM {
type = _type;
pixels_shm_id = _pixels_shm_id;
pixels_shm_offset = _pixels_shm_offset;
+ async_upload_token = _async_upload_token;
+ sync_data_shm_id = _sync_data_shm_id;
+ sync_data_shm_offset = _sync_data_shm_offset;
}
void* Set(void* cmd,
@@ -10223,7 +10250,10 @@ struct AsyncTexImage2DCHROMIUM {
GLenum _format,
GLenum _type,
uint32 _pixels_shm_id,
- uint32 _pixels_shm_offset) {
+ uint32 _pixels_shm_offset,
+ uint32 _async_upload_token,
+ uint32 _sync_data_shm_id,
+ uint32 _sync_data_shm_offset) {
static_cast<ValueType*>(cmd)->Init(_target,
_level,
_internalformat,
@@ -10233,7 +10263,10 @@ struct AsyncTexImage2DCHROMIUM {
_format,
_type,
_pixels_shm_id,
- _pixels_shm_offset);
+ _pixels_shm_offset,
+ _async_upload_token,
+ _sync_data_shm_id,
+ _sync_data_shm_offset);
return NextCmdAddress<ValueType>(cmd);
}
@@ -10248,10 +10281,13 @@ struct AsyncTexImage2DCHROMIUM {
uint32 type;
uint32 pixels_shm_id;
uint32 pixels_shm_offset;
+ uint32 async_upload_token;
+ uint32 sync_data_shm_id;
+ uint32 sync_data_shm_offset;
};
-COMPILE_ASSERT(sizeof(AsyncTexImage2DCHROMIUM) == 44,
- Sizeof_AsyncTexImage2DCHROMIUM_is_not_44);
+COMPILE_ASSERT(sizeof(AsyncTexImage2DCHROMIUM) == 56,
+ Sizeof_AsyncTexImage2DCHROMIUM_is_not_56);
COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, header) == 0,
OffsetOf_AsyncTexImage2DCHROMIUM_header_not_0);
COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, target) == 4,
@@ -10274,6 +10310,12 @@ COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, pixels_shm_id) == 36,
OffsetOf_AsyncTexImage2DCHROMIUM_pixels_shm_id_not_36);
COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, pixels_shm_offset) == 40,
OffsetOf_AsyncTexImage2DCHROMIUM_pixels_shm_offset_not_40);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, async_upload_token) == 44,
+ OffsetOf_AsyncTexImage2DCHROMIUM_async_upload_token_not_44);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, sync_data_shm_id) == 48,
+ OffsetOf_AsyncTexImage2DCHROMIUM_sync_data_shm_id_not_48);
+COMPILE_ASSERT(offsetof(AsyncTexImage2DCHROMIUM, sync_data_shm_offset) == 52,
+ OffsetOf_AsyncTexImage2DCHROMIUM_sync_data_shm_offset_not_52);
struct WaitAsyncTexImage2DCHROMIUM {
typedef WaitAsyncTexImage2DCHROMIUM ValueType;
@@ -10308,6 +10350,33 @@ COMPILE_ASSERT(offsetof(WaitAsyncTexImage2DCHROMIUM, header) == 0,
COMPILE_ASSERT(offsetof(WaitAsyncTexImage2DCHROMIUM, target) == 4,
OffsetOf_WaitAsyncTexImage2DCHROMIUM_target_not_4);
+struct WaitAllAsyncTexImage2DCHROMIUM {
+ typedef WaitAllAsyncTexImage2DCHROMIUM ValueType;
+ static const CommandId kCmdId = kWaitAllAsyncTexImage2DCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8 cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32 ComputeSize() {
+ return static_cast<uint32>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init() { SetHeader(); }
+
+ void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(WaitAllAsyncTexImage2DCHROMIUM) == 4,
+ Sizeof_WaitAllAsyncTexImage2DCHROMIUM_is_not_4);
+COMPILE_ASSERT(offsetof(WaitAllAsyncTexImage2DCHROMIUM, header) == 0,
+ OffsetOf_WaitAllAsyncTexImage2DCHROMIUM_header_not_0);
+
struct DiscardFramebufferEXT {
typedef DiscardFramebufferEXT ValueType;
static const CommandId kCmdId = kDiscardFramebufferEXT;
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test.cc b/gpu/command_buffer/common/gles2_cmd_format_test.cc
index 396ccb3..717e6fb 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_test.cc
+++ b/gpu/command_buffer/common/gles2_cmd_format_test.cc
@@ -4,6 +4,11 @@
// This file contains unit tests for gles2 commmands
+#include <limits>
+
+#include "base/bind.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "gpu/command_buffer/common/gles2_cmd_format.h"
@@ -46,6 +51,68 @@ class GLES2FormatTest : public testing::Test {
unsigned char buffer_[1024];
};
+void SignalCompletion(uint32* assigned_async_token_ptr,
+ uint32 async_token,
+ AsyncUploadSync* sync) {
+ EXPECT_EQ(async_token, *assigned_async_token_ptr);
+ sync->SetAsyncUploadToken(async_token);
+}
+
+TEST(GLES2FormatAsyncUploadSyncTest, AsyncUploadSync) {
+ const size_t kSize = 10;
+ const size_t kCount = 1000;
+
+ base::Thread thread("GLES2FormatUploadSyncTest - Fake Upload Thread");
+ thread.Start();
+
+ // Run the same test 50 times so we retest the wrap as well.
+ for (size_t test_run = 0; test_run < 50; ++test_run) {
+ AsyncUploadSync sync;
+ sync.Reset();
+
+ uint32 buffer_tokens[kSize];
+ memset(buffer_tokens, 0, sizeof(buffer_tokens));
+
+ // Start with a token large enough so that we'll wrap.
+ uint32 async_token = std::numeric_limits<uint32>::max() - kCount / 2;
+
+ // Set initial async token.
+ sync.SetAsyncUploadToken(async_token);
+
+ for (size_t i = 0; i < kCount; ++i) {
+ size_t buffer = i % kSize;
+
+ // Loop until previous async token has passed if any was set.
+ while (buffer_tokens[buffer] &&
+ !sync.HasAsyncUploadTokenPassed(buffer_tokens[buffer]))
+ base::PlatformThread::YieldCurrentThread();
+
+ // Next token, skip 0.
+ async_token++;
+ if (async_token == 0)
+ async_token++;
+
+ // Set the buffer's associated token.
+ buffer_tokens[buffer] = async_token;
+
+ // Set the async upload token on the fake upload thread and assert that
+ // the associated buffer still has the given token.
+ thread.message_loop()->PostTask(FROM_HERE,
+ base::Bind(&SignalCompletion,
+ &buffer_tokens[buffer],
+ async_token,
+ &sync));
+ }
+
+ // Flush the thread message loop before starting again.
+ base::WaitableEvent waitable(false, false);
+ thread.message_loop()->PostTask(FROM_HERE,
+ base::Bind(&base::WaitableEvent::Signal,
+ base::Unretained(&waitable)));
+ waitable.Wait();
+ }
+}
+
// GCC requires these declarations, but MSVC requires they not be present
#ifndef _MSC_VER
const unsigned char GLES2FormatTest::kInitialValue;
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 6e5594b..fe48327 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -3638,7 +3638,10 @@ TEST_F(GLES2FormatTest, AsyncTexSubImage2DCHROMIUM) {
static_cast<GLenum>(17),
static_cast<GLenum>(18),
static_cast<uint32>(19),
- static_cast<uint32>(20));
+ static_cast<uint32>(20),
+ static_cast<uint32>(21),
+ static_cast<uint32>(22),
+ static_cast<uint32>(23));
EXPECT_EQ(static_cast<uint32>(cmds::AsyncTexSubImage2DCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
@@ -3652,6 +3655,9 @@ TEST_F(GLES2FormatTest, AsyncTexSubImage2DCHROMIUM) {
EXPECT_EQ(static_cast<GLenum>(18), cmd.type);
EXPECT_EQ(static_cast<uint32>(19), cmd.data_shm_id);
EXPECT_EQ(static_cast<uint32>(20), cmd.data_shm_offset);
+ EXPECT_EQ(static_cast<uint32>(21), cmd.async_upload_token);
+ EXPECT_EQ(static_cast<uint32>(22), cmd.sync_data_shm_id);
+ EXPECT_EQ(static_cast<uint32>(23), cmd.sync_data_shm_offset);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -3668,7 +3674,10 @@ TEST_F(GLES2FormatTest, AsyncTexImage2DCHROMIUM) {
static_cast<GLenum>(17),
static_cast<GLenum>(18),
static_cast<uint32>(19),
- static_cast<uint32>(20));
+ static_cast<uint32>(20),
+ static_cast<uint32>(21),
+ static_cast<uint32>(22),
+ static_cast<uint32>(23));
EXPECT_EQ(static_cast<uint32>(cmds::AsyncTexImage2DCHROMIUM::kCmdId),
cmd.header.command);
EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
@@ -3682,6 +3691,9 @@ TEST_F(GLES2FormatTest, AsyncTexImage2DCHROMIUM) {
EXPECT_EQ(static_cast<GLenum>(18), cmd.type);
EXPECT_EQ(static_cast<uint32>(19), cmd.pixels_shm_id);
EXPECT_EQ(static_cast<uint32>(20), cmd.pixels_shm_offset);
+ EXPECT_EQ(static_cast<uint32>(21), cmd.async_upload_token);
+ EXPECT_EQ(static_cast<uint32>(22), cmd.sync_data_shm_id);
+ EXPECT_EQ(static_cast<uint32>(23), cmd.sync_data_shm_offset);
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
@@ -3696,6 +3708,16 @@ TEST_F(GLES2FormatTest, WaitAsyncTexImage2DCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, WaitAllAsyncTexImage2DCHROMIUM) {
+ cmds::WaitAllAsyncTexImage2DCHROMIUM& cmd =
+ *GetBufferAs<cmds::WaitAllAsyncTexImage2DCHROMIUM>();
+ void* next_cmd = cmd.Set(&cmd);
+ EXPECT_EQ(static_cast<uint32>(cmds::WaitAllAsyncTexImage2DCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, DiscardFramebufferEXT) {
cmds::DiscardFramebufferEXT& cmd =
*GetBufferAs<cmds::DiscardFramebufferEXT>();
diff --git a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index ed69305..f63df0e 100644
--- a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -237,14 +237,15 @@
OP(AsyncTexSubImage2DCHROMIUM) /* 478 */ \
OP(AsyncTexImage2DCHROMIUM) /* 479 */ \
OP(WaitAsyncTexImage2DCHROMIUM) /* 480 */ \
- OP(DiscardFramebufferEXT) /* 481 */ \
- OP(DiscardFramebufferEXTImmediate) /* 482 */ \
- OP(LoseContextCHROMIUM) /* 483 */ \
- OP(InsertSyncPointCHROMIUM) /* 484 */ \
- OP(WaitSyncPointCHROMIUM) /* 485 */ \
- OP(DrawBuffersEXT) /* 486 */ \
- OP(DrawBuffersEXTImmediate) /* 487 */ \
- OP(DiscardBackbufferCHROMIUM) /* 488 */
+ OP(WaitAllAsyncTexImage2DCHROMIUM) /* 481 */ \
+ OP(DiscardFramebufferEXT) /* 482 */ \
+ OP(DiscardFramebufferEXTImmediate) /* 483 */ \
+ OP(LoseContextCHROMIUM) /* 484 */ \
+ OP(InsertSyncPointCHROMIUM) /* 485 */ \
+ OP(WaitSyncPointCHROMIUM) /* 486 */ \
+ OP(DrawBuffersEXT) /* 487 */ \
+ OP(DrawBuffersEXTImmediate) /* 488 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 489 */
enum CommandId {
kStartPoint = cmd::kLastCommonId, // All GLES2 commands start after this.
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager.h b/gpu/command_buffer/service/async_pixel_transfer_manager.h
index 4b57619..1a818f3 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager.h
@@ -78,6 +78,9 @@ class GPU_EXPORT AsyncPixelTransferManager
virtual void ProcessMorePendingTransfers() = 0;
virtual bool NeedsProcessMorePendingTransfers() = 0;
+ // Wait for all AsyncTex(Sub)Image2D uploads to finish before returning.
+ virtual void WaitAllAsyncTexImage2D() = 0;
+
AsyncPixelTransferDelegate* CreatePixelTransferDelegate(
gles2::TextureRef* ref,
const AsyncTexImage2DParams& define_params);
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
index 32fa4c0..2bbe76b 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.cc
@@ -729,6 +729,16 @@ bool AsyncPixelTransferManagerEGL::NeedsProcessMorePendingTransfers() {
return false;
}
+void AsyncPixelTransferManagerEGL::WaitAllAsyncTexImage2D() {
+ if (shared_state_.pending_allocations.empty())
+ return;
+
+ AsyncPixelTransferDelegateEGL* delegate =
+ shared_state_.pending_allocations.back().get();
+ if (delegate)
+ delegate->WaitForTransferCompletion();
+}
+
AsyncPixelTransferDelegate*
AsyncPixelTransferManagerEGL::CreatePixelTransferDelegateImpl(
gles2::TextureRef* ref,
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
index 4c273fe..8f0c4b3 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_egl.h
@@ -27,6 +27,7 @@ class AsyncPixelTransferManagerEGL : public AsyncPixelTransferManager {
virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
virtual void ProcessMorePendingTransfers() OVERRIDE;
virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
// State shared between Managers and Delegates.
struct SharedState {
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
index 58748dd..40ec87f 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.cc
@@ -89,6 +89,7 @@ void AsyncPixelTransferDelegateIdle::AsyncTexImage2D(
shared_state_->tasks.push_back(AsyncPixelTransferManagerIdle::Task(
id_,
+ this,
base::Bind(&AsyncPixelTransferDelegateIdle::PerformAsyncTexImage2D,
AsWeakPtr(),
tex_params,
@@ -106,6 +107,7 @@ void AsyncPixelTransferDelegateIdle::AsyncTexSubImage2D(
shared_state_->tasks.push_back(AsyncPixelTransferManagerIdle::Task(
id_,
+ this,
base::Bind(&AsyncPixelTransferDelegateIdle::PerformAsyncTexSubImage2D,
AsWeakPtr(),
tex_params,
@@ -224,8 +226,11 @@ void AsyncPixelTransferDelegateIdle::PerformAsyncTexSubImage2D(
}
AsyncPixelTransferManagerIdle::Task::Task(
- uint64 transfer_id, const base::Closure& task)
+ uint64 transfer_id,
+ AsyncPixelTransferDelegate* delegate,
+ const base::Closure& task)
: transfer_id(transfer_id),
+ delegate(delegate),
task(task) {
}
@@ -267,6 +272,7 @@ void AsyncPixelTransferManagerIdle::AsyncNotifyCompletion(
shared_state_.tasks.push_back(
Task(0, // 0 transfer_id for notification tasks.
+ NULL,
base::Bind(
&PerformNotifyCompletion,
mem_params,
@@ -297,6 +303,15 @@ bool AsyncPixelTransferManagerIdle::NeedsProcessMorePendingTransfers() {
return !shared_state_.tasks.empty();
}
+void AsyncPixelTransferManagerIdle::WaitAllAsyncTexImage2D() {
+ if (shared_state_.tasks.empty())
+ return;
+
+ const Task& task = shared_state_.tasks.back();
+ if (task.delegate)
+ task.delegate->WaitForTransferCompletion();
+}
+
AsyncPixelTransferDelegate*
AsyncPixelTransferManagerIdle::CreatePixelTransferDelegateImpl(
gles2::TextureRef* ref,
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
index e7f990e..af3262f 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_idle.h
@@ -25,14 +25,19 @@ class AsyncPixelTransferManagerIdle : public AsyncPixelTransferManager {
virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
virtual void ProcessMorePendingTransfers() OVERRIDE;
virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
struct Task {
- Task(uint64 transfer_id, const base::Closure& task);
+ Task(uint64 transfer_id,
+ AsyncPixelTransferDelegate* delegate,
+ const base::Closure& task);
~Task();
// This is non-zero if pixel transfer task.
uint64 transfer_id;
+ AsyncPixelTransferDelegate* delegate;
+
base::Closure task;
};
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
index adc2a6f..3bc8b6b 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_mock.h
@@ -24,6 +24,7 @@ class MockAsyncPixelTransferManager : public AsyncPixelTransferManager {
MOCK_METHOD0(GetTotalTextureUploadTime, base::TimeDelta());
MOCK_METHOD0(ProcessMorePendingTransfers, void());
MOCK_METHOD0(NeedsProcessMorePendingTransfers, bool());
+ MOCK_METHOD0(WaitAllAsyncTexImage2D, void());
MOCK_METHOD2(
CreatePixelTransferDelegateImpl,
AsyncPixelTransferDelegate*(gles2::TextureRef* ref,
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
index e670dc7..99103b8 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.cc
@@ -534,6 +534,16 @@ bool AsyncPixelTransferManagerShareGroup::NeedsProcessMorePendingTransfers() {
return false;
}
+void AsyncPixelTransferManagerShareGroup::WaitAllAsyncTexImage2D() {
+ if (shared_state_.pending_allocations.empty())
+ return;
+
+ AsyncPixelTransferDelegateShareGroup* delegate =
+ shared_state_.pending_allocations.back().get();
+ if (delegate)
+ delegate->WaitForTransferCompletion();
+}
+
AsyncPixelTransferDelegate*
AsyncPixelTransferManagerShareGroup::CreatePixelTransferDelegateImpl(
gles2::TextureRef* ref,
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
index 173b532..64daffe 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_share_group.h
@@ -31,6 +31,7 @@ class AsyncPixelTransferManagerShareGroup : public AsyncPixelTransferManager {
virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
virtual void ProcessMorePendingTransfers() OVERRIDE;
virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
// State shared between Managers and Delegates.
struct SharedState {
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
index ccd5d3d..d5f96b0 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.cc
@@ -78,6 +78,9 @@ bool AsyncPixelTransferManagerStub::NeedsProcessMorePendingTransfers() {
return false;
}
+void AsyncPixelTransferManagerStub::WaitAllAsyncTexImage2D() {
+}
+
AsyncPixelTransferDelegate*
AsyncPixelTransferManagerStub::CreatePixelTransferDelegateImpl(
gles2::TextureRef* ref,
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
index 0f4e4ba..a93ce94 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_stub.h
@@ -23,6 +23,7 @@ class AsyncPixelTransferManagerStub : public AsyncPixelTransferManager {
virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
virtual void ProcessMorePendingTransfers() OVERRIDE;
virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
private:
// AsyncPixelTransferManager implementation:
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
index 69e3c34..cd7d087 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.cc
@@ -128,6 +128,9 @@ bool AsyncPixelTransferManagerSync::NeedsProcessMorePendingTransfers() {
return false;
}
+void AsyncPixelTransferManagerSync::WaitAllAsyncTexImage2D() {
+}
+
AsyncPixelTransferDelegate*
AsyncPixelTransferManagerSync::CreatePixelTransferDelegateImpl(
gles2::TextureRef* ref,
diff --git a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
index cb62062..7d0b8b6 100644
--- a/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
+++ b/gpu/command_buffer/service/async_pixel_transfer_manager_sync.h
@@ -23,6 +23,7 @@ class AsyncPixelTransferManagerSync : public AsyncPixelTransferManager {
virtual base::TimeDelta GetTotalTextureUploadTime() OVERRIDE;
virtual void ProcessMorePendingTransfers() OVERRIDE;
virtual bool NeedsProcessMorePendingTransfers() OVERRIDE;
+ virtual void WaitAllAsyncTexImage2D() OVERRIDE;
// State shared between Managers and Delegates.
struct SharedState {
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 9ca8213..c3bdae5 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -15,10 +15,12 @@
#include "base/at_exit.h"
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/debug/trace_event.h"
#include "base/debug/trace_event_synthetic_delay.h"
#include "base/memory/scoped_ptr.h"
+#include "base/numerics/safe_math.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "build/build_config.h"
@@ -518,6 +520,29 @@ struct FenceCallback {
scoped_ptr<gfx::GLFence> fence;
};
+class AsyncUploadTokenCompletionObserver
+ : public AsyncPixelTransferCompletionObserver {
+ public:
+ explicit AsyncUploadTokenCompletionObserver(uint32 async_upload_token)
+ : async_upload_token_(async_upload_token) {
+ }
+
+ virtual void DidComplete(const AsyncMemoryParams& mem_params) OVERRIDE {
+ DCHECK(mem_params.buffer());
+ void* data = mem_params.GetDataAddress();
+ AsyncUploadSync* sync = static_cast<AsyncUploadSync*>(data);
+ sync->SetAsyncUploadToken(async_upload_token_);
+ }
+
+ private:
+ virtual ~AsyncUploadTokenCompletionObserver() {
+ }
+
+ uint32 async_upload_token_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncUploadTokenCompletionObserver);
+};
+
// } // anonymous namespace.
bool GLES2Decoder::GetServiceTextureId(uint32 client_texture_id,
@@ -713,6 +738,13 @@ class GLES2DecoderImpl : public GLES2Decoder,
bool GenVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
void DeleteVertexArraysOESHelper(GLsizei n, const GLuint* client_ids);
+ // Helper for async upload token completion notification callback.
+ base::Closure AsyncUploadTokenCompletionClosure(uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset);
+
+
+
// Workarounds
void OnFboChanged() const;
void OnUseFramebuffer() const;
@@ -10355,6 +10387,29 @@ bool GLES2DecoderImpl::ValidateAsyncTransfer(
return true;
}
+base::Closure GLES2DecoderImpl::AsyncUploadTokenCompletionClosure(
+ uint32 async_upload_token,
+ uint32 sync_data_shm_id,
+ uint32 sync_data_shm_offset) {
+ scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_data_shm_id);
+ if (!buffer || !buffer->GetDataAddress(sync_data_shm_offset,
+ sizeof(AsyncUploadSync)))
+ return base::Closure();
+
+ AsyncMemoryParams mem_params(buffer,
+ sync_data_shm_offset,
+ sizeof(AsyncUploadSync));
+
+ scoped_refptr<AsyncUploadTokenCompletionObserver> observer(
+ new AsyncUploadTokenCompletionObserver(async_upload_token));
+
+ return base::Bind(
+ &AsyncPixelTransferManager::AsyncNotifyCompletion,
+ base::Unretained(GetAsyncPixelTransferManager()),
+ mem_params,
+ observer);
+}
+
error::Error GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM(
uint32 immediate_data_size, const cmds::AsyncTexImage2DCHROMIUM& c) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM");
@@ -10371,6 +10426,21 @@ error::Error GLES2DecoderImpl::HandleAsyncTexImage2DCHROMIUM(
uint32 pixels_shm_id = static_cast<uint32>(c.pixels_shm_id);
uint32 pixels_shm_offset = static_cast<uint32>(c.pixels_shm_offset);
uint32 pixels_size;
+ uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
+ uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
+ uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ base::ScopedClosureRunner scoped_completion_callback;
+ if (async_upload_token) {
+ base::Closure completion_closure =
+ AsyncUploadTokenCompletionClosure(async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ if (completion_closure.is_null())
+ return error::kInvalidArguments;
+
+ scoped_completion_callback.Reset(completion_closure);
+ }
// TODO(epenner): Move this and copies of this memory validation
// into ValidateTexImage2D step.
@@ -10457,6 +10527,21 @@ error::Error GLES2DecoderImpl::HandleAsyncTexSubImage2DCHROMIUM(
GLsizei height = static_cast<GLsizei>(c.height);
GLenum format = static_cast<GLenum>(c.format);
GLenum type = static_cast<GLenum>(c.type);
+ uint32 async_upload_token = static_cast<uint32>(c.async_upload_token);
+ uint32 sync_data_shm_id = static_cast<uint32>(c.sync_data_shm_id);
+ uint32 sync_data_shm_offset = static_cast<uint32>(c.sync_data_shm_offset);
+
+ base::ScopedClosureRunner scoped_completion_callback;
+ if (async_upload_token) {
+ base::Closure completion_closure =
+ AsyncUploadTokenCompletionClosure(async_upload_token,
+ sync_data_shm_id,
+ sync_data_shm_offset);
+ if (completion_closure.is_null())
+ return error::kInvalidArguments;
+
+ scoped_completion_callback.Reset(completion_closure);
+ }
// TODO(epenner): Move this and copies of this memory validation
// into ValidateTexSubImage2D step.
@@ -10559,6 +10644,15 @@ error::Error GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleWaitAllAsyncTexImage2DCHROMIUM(
+ uint32 immediate_data_size, const cmds::WaitAllAsyncTexImage2DCHROMIUM& c) {
+ TRACE_EVENT0("gpu", "GLES2DecoderImpl::HandleWaitAsyncTexImage2DCHROMIUM");
+
+ GetAsyncPixelTransferManager()->WaitAllAsyncTexImage2D();
+ ProcessFinishedAsyncTransfers();
+ return error::kNoError;
+}
+
void GLES2DecoderImpl::OnTextureRefDetachedFromFramebuffer(
TextureRef* texture_ref) {
Texture* texture = texture_ref->texture();
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index d2e6e97..6437648 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -8223,12 +8223,16 @@ TEST_F(GLES2DecoderManualInitTest, AsyncPixelTransfers) {
// Tex(Sub)Image2D upload commands.
AsyncTexImage2DCHROMIUM teximage_cmd;
teximage_cmd.Init(GL_TEXTURE_2D, 0, GL_RGBA, 8, 8, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, kSharedMemoryId, kSharedMemoryOffset);
+ GL_UNSIGNED_BYTE, kSharedMemoryId, kSharedMemoryOffset,
+ 0, 0, 0);
AsyncTexSubImage2DCHROMIUM texsubimage_cmd;
texsubimage_cmd.Init(GL_TEXTURE_2D, 0, 0, 0, 8, 8, GL_RGBA,
- GL_UNSIGNED_BYTE, kSharedMemoryId, kSharedMemoryOffset);
+ GL_UNSIGNED_BYTE, kSharedMemoryId, kSharedMemoryOffset,
+ 0, 0, 0);
WaitAsyncTexImage2DCHROMIUM wait_cmd;
wait_cmd.Init(GL_TEXTURE_2D);
+ WaitAllAsyncTexImage2DCHROMIUM wait_all_cmd;
+ wait_all_cmd.Init();
// No transfer state exists initially.
EXPECT_FALSE(
@@ -8354,13 +8358,15 @@ TEST_F(GLES2DecoderManualInitTest, AsyncPixelTransfers) {
EXPECT_FALSE(
decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
texture_ref));
+ texture = NULL;
+ texture_ref = NULL;
delegate = NULL;
}
// WaitAsyncTexImage2D
{
// Get a fresh texture since the existing texture cannot be respecified
- // asynchronously and AsyncTexSubImage2D does not involved binding.
+ // asynchronously and AsyncTexSubImage2D does not involve binding.
EXPECT_CALL(*gl_, GenTextures(1, _))
.WillOnce(SetArgumentPointee<1>(kServiceTextureId));
DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
@@ -8389,6 +8395,47 @@ TEST_F(GLES2DecoderManualInitTest, AsyncPixelTransfers) {
EXPECT_EQ(error::kNoError, ExecuteCmd(wait_cmd));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
+
+ // WaitAllAsyncTexImage2D
+ EXPECT_CALL(*delegate, Destroy()).RetiresOnSaturation();
+ DoDeleteTexture(client_texture_id_, kServiceTextureId);
+ EXPECT_FALSE(
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+ texture = NULL;
+ texture_ref = NULL;
+ delegate = NULL;
+ {
+ // Get a fresh texture since the existing texture cannot be respecified
+ // asynchronously and AsyncTexSubImage2D does not involve binding.
+ EXPECT_CALL(*gl_, GenTextures(1, _))
+ .WillOnce(SetArgumentPointee<1>(kServiceTextureId));
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ texture_ref = GetTexture(client_texture_id_);
+ texture = texture_ref->texture();
+ texture->SetImmutable(false);
+ // Create transfer state since it doesn't exist.
+ EXPECT_CALL(*manager, CreatePixelTransferDelegateImpl(texture_ref, _))
+ .WillOnce(Return(
+ delegate = new StrictMock<gpu::MockAsyncPixelTransferDelegate>))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*delegate, AsyncTexImage2D(_, _, _))
+ .RetiresOnSaturation();
+ // Start async transfer.
+ EXPECT_EQ(error::kNoError, ExecuteCmd(teximage_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(
+ delegate,
+ decoder_->GetAsyncPixelTransferManager()->GetPixelTransferDelegate(
+ texture_ref));
+
+ EXPECT_TRUE(texture->IsImmutable());
+ // Wait for completion of all uploads.
+ EXPECT_CALL(*manager, WaitAllAsyncTexImage2D()).RetiresOnSaturation();
+ EXPECT_CALL(*manager, BindCompletedAsyncTransfers());
+ EXPECT_EQ(error::kNoError, ExecuteCmd(wait_all_cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
}
TEST_F(GLES2DecoderManualInitTest, AsyncPixelTransferManager) {
@@ -8412,7 +8459,8 @@ TEST_F(GLES2DecoderManualInitTest, AsyncPixelTransferManager) {
AsyncTexImage2DCHROMIUM teximage_cmd;
teximage_cmd.Init(GL_TEXTURE_2D, 0, GL_RGBA, 8, 8, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, kSharedMemoryId, kSharedMemoryOffset);
+ GL_UNSIGNED_BYTE, kSharedMemoryId, kSharedMemoryOffset,
+ 0, 0, 0);
// No transfer delegate exists initially.
EXPECT_FALSE(
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
index 37d907f..f7e943e 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -54,6 +54,8 @@
// TODO(gman): WaitAsyncTexImage2DCHROMIUM
+// TODO(gman): WaitAllAsyncTexImage2DCHROMIUM
+
// TODO(gman): DiscardFramebufferEXT
// TODO(gman): DiscardFramebufferEXTImmediate
// TODO(gman): LoseContextCHROMIUM