summaryrefslogtreecommitdiffstats
path: root/content/common/gpu/media
diff options
context:
space:
mode:
authorowenlin@chromium.org <owenlin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-21 14:22:40 +0000
committerowenlin@chromium.org <owenlin@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-21 14:24:25 +0000
commit569fda4ef1c60773d2c2400f16ffab0601c9dafb (patch)
tree0f48aaf8c1214bc33e8ad4d0f06bd22e4a1f072b /content/common/gpu/media
parent054e530f8e29db0a3be5724a40934a902910cd40 (diff)
downloadchromium_src-569fda4ef1c60773d2c2400f16ffab0601c9dafb.zip
chromium_src-569fda4ef1c60773d2c2400f16ffab0601c9dafb.tar.gz
chromium_src-569fda4ef1c60773d2c2400f16ffab0601c9dafb.tar.bz2
rendering_helper - Refactoring - remove the Client interface.
Use a callback to return the released picture from rendering_helper. So we don't need the RenderingHelper::Client interface and thus simplify the code. BUG=None TEST=Run the vda_unittest on lumpy and ARM CrOS device. Review URL: https://codereview.chromium.org/462413004 Cr-Commit-Position: refs/heads/master@{#291061} git-svn-id: svn://svn.chromium.org/chrome/trunk/src@291061 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/common/gpu/media')
-rw-r--r--content/common/gpu/media/rendering_helper.cc118
-rw-r--r--content/common/gpu/media/rendering_helper.h111
-rw-r--r--content/common/gpu/media/video_decode_accelerator_unittest.cc117
3 files changed, 198 insertions, 148 deletions
diff --git a/content/common/gpu/media/rendering_helper.cc b/content/common/gpu/media/rendering_helper.cc
index 7ee9401..ce2f94b 100644
--- a/content/common/gpu/media/rendering_helper.cc
+++ b/content/common/gpu/media/rendering_helper.cc
@@ -9,6 +9,7 @@
#include <vector>
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/mac/scoped_nsautorelease_pool.h"
#include "base/message_loop/message_loop.h"
@@ -60,6 +61,25 @@ RenderingHelperParams::RenderingHelperParams() {}
RenderingHelperParams::~RenderingHelperParams() {}
+VideoFrameTexture::VideoFrameTexture(uint32 texture_target,
+ uint32 texture_id,
+ const base::Closure& no_longer_needed_cb)
+ : texture_target_(texture_target),
+ texture_id_(texture_id),
+ no_longer_needed_cb_(no_longer_needed_cb) {
+ DCHECK(!no_longer_needed_cb_.is_null());
+}
+
+VideoFrameTexture::~VideoFrameTexture() {
+ base::ResetAndReturn(&no_longer_needed_cb_).Run();
+}
+
+RenderingHelper::RenderedVideo::RenderedVideo() : last_frame_rendered(false) {
+}
+
+RenderingHelper::RenderedVideo::~RenderedVideo() {
+}
+
// static
bool RenderingHelper::InitializeOneOff() {
base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
@@ -78,15 +98,15 @@ RenderingHelper::RenderingHelper() {
}
RenderingHelper::~RenderingHelper() {
- CHECK_EQ(clients_.size(), 0U) << "Must call UnInitialize before dtor.";
+ CHECK_EQ(videos_.size(), 0U) << "Must call UnInitialize before dtor.";
Clear();
}
void RenderingHelper::Initialize(const RenderingHelperParams& params,
base::WaitableEvent* done) {
- // Use cients_.size() != 0 as a proxy for the class having already been
+ // Use videos_.size() != 0 as a proxy for the class having already been
// Initialize()'d, and UnInitialize() before continuing.
- if (clients_.size()) {
+ if (videos_.size()) {
base::WaitableEvent done(false, false);
UnInitialize(&done);
done.Wait();
@@ -153,12 +173,12 @@ void RenderingHelper::Initialize(const RenderingHelperParams& params,
NULL, gl_surface_, gfx::PreferIntegratedGpu);
gl_context_->MakeCurrent(gl_surface_);
- clients_ = params.clients;
- CHECK_GT(clients_.size(), 0U);
- LayoutRenderingAreas();
+ CHECK_GT(params.window_sizes.size(), 0U);
+ videos_.resize(params.window_sizes.size());
+ LayoutRenderingAreas(params.window_sizes);
if (render_as_thumbnails_) {
- CHECK_EQ(clients_.size(), 1U);
+ CHECK_EQ(videos_.size(), 1U);
GLint max_texture_size;
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_texture_size);
@@ -370,6 +390,31 @@ void RenderingHelper::RenderThumbnail(uint32 texture_target,
++frame_count_;
}
+void RenderingHelper::QueueVideoFrame(
+ size_t window_id,
+ scoped_refptr<VideoFrameTexture> video_frame) {
+ CHECK_EQ(base::MessageLoop::current(), message_loop_);
+ RenderedVideo* video = &videos_[window_id];
+
+ // Pop the last frame if it has been rendered.
+ if (video->last_frame_rendered) {
+ // When last_frame_rendered is true, we should have only one pending frame.
+ // Since we are going to have a new frame, we can release the pending one.
+ DCHECK(video->pending_frames.size() == 1);
+ video->pending_frames.pop();
+ video->last_frame_rendered = false;
+ }
+
+ video->pending_frames.push(video_frame);
+}
+
+void RenderingHelper::DropPendingFrames(size_t window_id) {
+ CHECK_EQ(base::MessageLoop::current(), message_loop_);
+ RenderedVideo* video = &videos_[window_id];
+ video->pending_frames = std::queue<scoped_refptr<VideoFrameTexture> >();
+ video->last_frame_rendered = false;
+}
+
void RenderingHelper::RenderTexture(uint32 texture_target, uint32 texture_id) {
// The ExternalOES sampler is bound to GL_TEXTURE1 and the Texture2D sampler
// is bound to GL_TEXTURE0.
@@ -385,6 +430,7 @@ void RenderingHelper::RenderTexture(uint32 texture_target, uint32 texture_id) {
}
void RenderingHelper::DeleteTexture(uint32 texture_id) {
+ CHECK_EQ(base::MessageLoop::current(), message_loop_);
glDeleteTextures(1, &texture_id);
CHECK_EQ(static_cast<int>(glGetError()), GL_NO_ERROR);
}
@@ -398,7 +444,7 @@ void* RenderingHelper::GetGLDisplay() {
}
void RenderingHelper::Clear() {
- clients_.clear();
+ videos_.clear();
message_loop_ = NULL;
gl_context_ = NULL;
gl_surface_ = NULL;
@@ -461,16 +507,30 @@ void RenderingHelper::RenderContent() {
CHECK_EQ(base::MessageLoop::current(), message_loop_);
glUniform1i(glGetUniformLocation(program_, "tex_flip"), 1);
+ // Frames that will be returned to the client (via the no_longer_needed_cb)
+ // after this vector falls out of scope at the end of this method. We need
+ // to keep references to them until after SwapBuffers() call below.
+ std::vector<scoped_refptr<VideoFrameTexture> > frames_to_be_returned;
+
if (render_as_thumbnails_) {
// In render_as_thumbnails_ mode, we render the FBO content on the
// screen instead of the decoded textures.
- GLSetViewPort(render_areas_[0]);
+ GLSetViewPort(videos_[0].render_area);
RenderTexture(GL_TEXTURE_2D, thumbnails_texture_id_);
} else {
- for (size_t i = 0; i < clients_.size(); ++i) {
- if (clients_[i]) {
- GLSetViewPort(render_areas_[i]);
- clients_[i]->RenderContent(this);
+ for (size_t i = 0; i < videos_.size(); ++i) {
+ RenderedVideo* video = &videos_[i];
+ if (video->pending_frames.empty())
+ continue;
+ scoped_refptr<VideoFrameTexture> frame = video->pending_frames.front();
+ GLSetViewPort(video->render_area);
+ RenderTexture(frame->texture_target(), frame->texture_id());
+
+ if (video->pending_frames.size() > 1) {
+ frames_to_be_returned.push_back(video->pending_frames.front());
+ video->pending_frames.pop();
+ } else {
+ video->last_frame_rendered = true;
}
}
}
@@ -492,11 +552,12 @@ static void ScaleAndCalculateOffsets(std::vector<int>* lengths,
}
}
-void RenderingHelper::LayoutRenderingAreas() {
+void RenderingHelper::LayoutRenderingAreas(
+ const std::vector<gfx::Size>& window_sizes) {
// Find the number of colums and rows.
- // The smallest n * n or n * (n + 1) > number of clients.
- size_t cols = sqrt(clients_.size() - 1) + 1;
- size_t rows = (clients_.size() + cols - 1) / cols;
+ // The smallest n * n or n * (n + 1) > number of windows.
+ size_t cols = sqrt(videos_.size() - 1) + 1;
+ size_t rows = (videos_.size() + cols - 1) / cols;
// Find the widths and heights of the grid.
std::vector<int> widths(cols);
@@ -504,31 +565,30 @@ void RenderingHelper::LayoutRenderingAreas() {
std::vector<int> offset_x(cols);
std::vector<int> offset_y(rows);
- for (size_t i = 0; i < clients_.size(); ++i) {
- const gfx::Size& window_size = clients_[i]->GetWindowSize();
- widths[i % cols] = std::max(widths[i % cols], window_size.width());
- heights[i / cols] = std::max(heights[i / cols], window_size.height());
+ for (size_t i = 0; i < window_sizes.size(); ++i) {
+ const gfx::Size& size = window_sizes[i];
+ widths[i % cols] = std::max(widths[i % cols], size.width());
+ heights[i / cols] = std::max(heights[i / cols], size.height());
}
ScaleAndCalculateOffsets(&widths, &offset_x, screen_size_.width());
ScaleAndCalculateOffsets(&heights, &offset_y, screen_size_.height());
// Put each render_area_ in the center of each cell.
- render_areas_.clear();
- for (size_t i = 0; i < clients_.size(); ++i) {
- const gfx::Size& window_size = clients_[i]->GetWindowSize();
+ for (size_t i = 0; i < window_sizes.size(); ++i) {
+ const gfx::Size& size = window_sizes[i];
float scale =
- std::min(static_cast<float>(widths[i % cols]) / window_size.width(),
- static_cast<float>(heights[i / cols]) / window_size.height());
+ std::min(static_cast<float>(widths[i % cols]) / size.width(),
+ static_cast<float>(heights[i / cols]) / size.height());
// Don't scale up the texture.
scale = std::min(1.0f, scale);
- size_t w = scale * window_size.width();
- size_t h = scale * window_size.height();
+ size_t w = scale * size.width();
+ size_t h = scale * size.height();
size_t x = offset_x[i % cols] + (widths[i % cols] - w) / 2;
size_t y = offset_y[i / cols] + (heights[i / cols] - h) / 2;
- render_areas_.push_back(gfx::Rect(x, y, w, h));
+ videos_[i].render_area = gfx::Rect(x, y, w, h);
}
}
} // namespace content
diff --git a/content/common/gpu/media/rendering_helper.h b/content/common/gpu/media/rendering_helper.h
index a2dfb1b..6b0013f 100644
--- a/content/common/gpu/media/rendering_helper.h
+++ b/content/common/gpu/media/rendering_helper.h
@@ -6,6 +6,7 @@
#define CONTENT_COMMON_GPU_MEDIA_RENDERING_HELPER_H_
#include <map>
+#include <queue>
#include <vector>
#include "base/basictypes.h"
@@ -24,25 +25,53 @@ class WaitableEvent;
namespace content {
-struct RenderingHelperParams;
+class VideoFrameTexture : public base::RefCounted<VideoFrameTexture> {
+ public:
+ uint32 texture_id() const { return texture_id_; }
+ uint32 texture_target() const { return texture_target_; }
+
+ VideoFrameTexture(uint32 texture_target,
+ uint32 texture_id,
+ const base::Closure& no_longer_needed_cb);
+
+ private:
+ friend class base::RefCounted<VideoFrameTexture>;
+
+ uint32 texture_target_;
+ uint32 texture_id_;
+ base::Closure no_longer_needed_cb_;
+
+ ~VideoFrameTexture();
+};
+
+struct RenderingHelperParams {
+ RenderingHelperParams();
+ ~RenderingHelperParams();
+
+ // The rendering FPS.
+ int rendering_fps;
+
+ // The desired size of each window. We play each stream in its own window
+ // on the screen.
+ std::vector<gfx::Size> window_sizes;
+
+ // The members below are only used for the thumbnail mode where all frames
+ // are rendered in sequence onto one FBO for comparison/verification purposes.
+
+ // Whether the frames are rendered as scaled thumbnails within a
+ // larger FBO that is in turn rendered to the window.
+ bool render_as_thumbnails;
+ // The size of the FBO containing all visible thumbnails.
+ gfx::Size thumbnails_page_size;
+ // The size of each thumbnail within the FBO.
+ gfx::Size thumbnail_size;
+};
// Creates and draws textures used by the video decoder.
// This class is not thread safe and thus all the methods of this class
// (except for ctor/dtor) ensure they're being run on a single thread.
class RenderingHelper {
public:
- // Interface for the content provider of the RenderingHelper.
- class Client {
- public:
- // Callback to tell client to render the content.
- virtual void RenderContent(RenderingHelper* helper) = 0;
-
- // Callback to get the desired window size of the client.
- virtual const gfx::Size& GetWindowSize() = 0;
-
- protected:
- virtual ~Client() {}
- };
RenderingHelper();
~RenderingHelper();
@@ -67,9 +96,12 @@ class RenderingHelper {
// |texture_target|.
void RenderThumbnail(uint32 texture_target, uint32 texture_id);
- // Render |texture_id| to the current view port of the screen using target
- // |texture_target|.
- void RenderTexture(uint32 texture_target, uint32 texture_id);
+ // Queues the |video_frame| for rendering.
+ void QueueVideoFrame(size_t window_id,
+ scoped_refptr<VideoFrameTexture> video_frame);
+
+ // Drops all the pending video frames of the specified window.
+ void DropPendingFrames(size_t window_id);
// Delete |texture_id|.
void DeleteTexture(uint32 texture_id);
@@ -87,11 +119,33 @@ class RenderingHelper {
base::WaitableEvent* done);
private:
+ struct RenderedVideo {
+ // The rect on the screen where the video will be rendered.
+ gfx::Rect render_area;
+
+ // True if the last (and the only one) frame in pending_frames has
+ // been rendered. We keep the last remaining frame in pending_frames even
+ // after it has been rendered, so that we have something to display if the
+ // client is falling behind on providing us with new frames during
+ // timer-driven playback.
+ bool last_frame_rendered;
+
+ // The video frames pending for rendering.
+ std::queue<scoped_refptr<VideoFrameTexture> > pending_frames;
+
+ RenderedVideo();
+ ~RenderedVideo();
+ };
+
void Clear();
void RenderContent();
- void LayoutRenderingAreas();
+ void LayoutRenderingAreas(const std::vector<gfx::Size>& window_sizes);
+
+ // Render |texture_id| to the current view port of the screen using target
+ // |texture_target|.
+ void RenderTexture(uint32 texture_target, uint32 texture_id);
// Timer to trigger the RenderContent() repeatly.
scoped_ptr<base::RepeatingTimer<RenderingHelper> > render_timer_;
@@ -104,10 +158,7 @@ class RenderingHelper {
gfx::Size screen_size_;
- // The rendering area of each window on the screen.
- std::vector<gfx::Rect> render_areas_;
-
- std::vector<base::WeakPtr<Client> > clients_;
+ std::vector<RenderedVideo> videos_;
bool render_as_thumbnails_;
int frame_count_;
@@ -121,24 +172,6 @@ class RenderingHelper {
DISALLOW_COPY_AND_ASSIGN(RenderingHelper);
};
-struct RenderingHelperParams {
- RenderingHelperParams();
- ~RenderingHelperParams();
-
- // The rendering FPS.
- int rendering_fps;
-
- // The clients who provide the content for rendering.
- std::vector<base::WeakPtr<RenderingHelper::Client> > clients;
-
- // Whether the frames are rendered as scaled thumbnails within a
- // larger FBO that is in turn rendered to the window.
- bool render_as_thumbnails;
- // The size of the FBO containing all visible thumbnails.
- gfx::Size thumbnails_page_size;
- // The size of each thumbnail within the FBO.
- gfx::Size thumbnail_size;
-};
} // namespace content
#endif // CONTENT_COMMON_GPU_MEDIA_RENDERING_HELPER_H_
diff --git a/content/common/gpu/media/video_decode_accelerator_unittest.cc b/content/common/gpu/media/video_decode_accelerator_unittest.cc
index 071dced..65cc939 100644
--- a/content/common/gpu/media/video_decode_accelerator_unittest.cc
+++ b/content/common/gpu/media/video_decode_accelerator_unittest.cc
@@ -193,9 +193,10 @@ enum ClientState {
// the TESTs below.
class GLRenderingVDAClient
: public VideoDecodeAccelerator::Client,
- public RenderingHelper::Client,
public base::SupportsWeakPtr<GLRenderingVDAClient> {
public:
+ // |window_id| the window_id of the client, which is used to identify the
+ // rendering area in the |rendering_helper|.
// Doesn't take ownership of |rendering_helper| or |note|, which must outlive
// |*this|.
// |num_play_throughs| indicates how many times to play through the video.
@@ -212,7 +213,8 @@ class GLRenderingVDAClient
// will start delaying the call to ReusePictureBuffer() for kReuseDelay.
// |decode_calls_per_second| is the number of VDA::Decode calls per second.
// If |decode_calls_per_second| > 0, |num_in_flight_decodes| must be 1.
- GLRenderingVDAClient(RenderingHelper* rendering_helper,
+ GLRenderingVDAClient(size_t window_id,
+ RenderingHelper* rendering_helper,
ClientStateNotification<ClientState>* note,
const std::string& encoded_data,
int num_in_flight_decodes,
@@ -242,14 +244,8 @@ class GLRenderingVDAClient
virtual void NotifyResetDone() OVERRIDE;
virtual void NotifyError(VideoDecodeAccelerator::Error error) OVERRIDE;
- // RenderingHelper::Client implementation.
- virtual void RenderContent(RenderingHelper*) OVERRIDE;
- virtual const gfx::Size& GetWindowSize() OVERRIDE;
-
void OutputFrameDeliveryTimes(base::File* output);
- void NotifyFrameDropped(int32 picture_buffer_id);
-
// Simple getters for inspecting the state of the Client.
int num_done_bitstream_buffers() { return num_done_bitstream_buffers_; }
int num_skipped_fragments() { return num_skipped_fragments_; }
@@ -285,6 +281,7 @@ class GLRenderingVDAClient
// Request decode of the next fragment in the encoded data.
void DecodeNextFragment();
+ size_t window_id_;
RenderingHelper* rendering_helper_;
gfx::Size frame_size_;
std::string encoded_data_;
@@ -319,13 +316,12 @@ class GLRenderingVDAClient
// The number of VDA::Decode calls per second. This is to simulate webrtc.
int decode_calls_per_second_;
bool render_as_thumbnails_;
- bool pending_picture_updated_;
- std::deque<int32> pending_picture_buffer_ids_;
DISALLOW_IMPLICIT_CONSTRUCTORS(GLRenderingVDAClient);
};
GLRenderingVDAClient::GLRenderingVDAClient(
+ size_t window_id,
RenderingHelper* rendering_helper,
ClientStateNotification<ClientState>* note,
const std::string& encoded_data,
@@ -340,7 +336,8 @@ GLRenderingVDAClient::GLRenderingVDAClient(
int delay_reuse_after_frame_num,
int decode_calls_per_second,
bool render_as_thumbnails)
- : rendering_helper_(rendering_helper),
+ : window_id_(window_id),
+ rendering_helper_(rendering_helper),
frame_size_(frame_width, frame_height),
encoded_data_(encoded_data),
num_in_flight_decodes_(num_in_flight_decodes),
@@ -360,8 +357,7 @@ GLRenderingVDAClient::GLRenderingVDAClient(
suppress_rendering_(suppress_rendering),
delay_reuse_after_frame_num_(delay_reuse_after_frame_num),
decode_calls_per_second_(decode_calls_per_second),
- render_as_thumbnails_(render_as_thumbnails),
- pending_picture_updated_(true) {
+ render_as_thumbnails_(render_as_thumbnails) {
CHECK_GT(num_in_flight_decodes, 0);
CHECK_GT(num_play_throughs, 0);
// |num_in_flight_decodes_| is unsupported if |decode_calls_per_second_| > 0.
@@ -459,42 +455,6 @@ void GLRenderingVDAClient::DismissPictureBuffer(int32 picture_buffer_id) {
picture_buffers_by_id_.erase(it);
}
-void GLRenderingVDAClient::RenderContent(RenderingHelper*) {
- CHECK(!render_as_thumbnails_);
-
- // No decoded texture for rendering yet, just skip.
- if (pending_picture_buffer_ids_.size() == 0)
- return;
-
- int32 buffer_id = pending_picture_buffer_ids_.front();
- media::PictureBuffer* picture_buffer = picture_buffers_by_id_[buffer_id];
-
- CHECK(picture_buffer);
- if (!pending_picture_updated_) {
- // Frame dropped, just redraw the last texture.
- rendering_helper_->RenderTexture(texture_target_,
- picture_buffer->texture_id());
- return;
- }
-
- base::TimeTicks now = base::TimeTicks::Now();
- frame_delivery_times_.push_back(now);
-
- rendering_helper_->RenderTexture(texture_target_,
- picture_buffer->texture_id());
-
- if (pending_picture_buffer_ids_.size() == 1) {
- pending_picture_updated_ = false;
- } else {
- pending_picture_buffer_ids_.pop_front();
- ReturnPicture(buffer_id);
- }
-}
-
-const gfx::Size& GLRenderingVDAClient::GetWindowSize() {
- return render_as_thumbnails_ ? kThumbnailsPageSize : frame_size_;
-}
-
void GLRenderingVDAClient::PictureReady(const media::Picture& picture) {
// We shouldn't be getting pictures delivered after Reset has completed.
CHECK_LT(state_, CS_RESET);
@@ -503,6 +463,9 @@ void GLRenderingVDAClient::PictureReady(const media::Picture& picture) {
return;
base::TimeTicks now = base::TimeTicks::Now();
+
+ frame_delivery_times_.push_back(now);
+
// Save the decode time of this picture.
std::map<int, base::TimeTicks>::iterator it =
decode_start_time_.find(picture.bitstream_buffer_id());
@@ -524,25 +487,22 @@ void GLRenderingVDAClient::PictureReady(const media::Picture& picture) {
encoded_data_next_pos_to_decode_ = 0;
}
+ media::PictureBuffer* picture_buffer =
+ picture_buffers_by_id_[picture.picture_buffer_id()];
+ CHECK(picture_buffer);
+
+ scoped_refptr<VideoFrameTexture> video_frame =
+ new VideoFrameTexture(texture_target_,
+ picture_buffer->texture_id(),
+ base::Bind(&GLRenderingVDAClient::ReturnPicture,
+ AsWeakPtr(),
+ picture.picture_buffer_id()));
+
if (render_as_thumbnails_) {
- frame_delivery_times_.push_back(now);
- media::PictureBuffer* picture_buffer =
- picture_buffers_by_id_[picture.picture_buffer_id()];
- CHECK(picture_buffer);
- rendering_helper_->RenderThumbnail(texture_target_,
- picture_buffer->texture_id());
- ReturnPicture(picture.picture_buffer_id());
+ rendering_helper_->RenderThumbnail(video_frame->texture_target(),
+ video_frame->texture_id());
} else if (!suppress_rendering_) {
- // Keep the picture for rendering.
- pending_picture_buffer_ids_.push_back(picture.picture_buffer_id());
- if (pending_picture_buffer_ids_.size() > 1 && !pending_picture_updated_) {
- ReturnPicture(pending_picture_buffer_ids_.front());
- pending_picture_buffer_ids_.pop_front();
- pending_picture_updated_ = true;
- }
- } else {
- frame_delivery_times_.push_back(now);
- ReturnPicture(picture.picture_buffer_id());
+ rendering_helper_->QueueVideoFrame(window_id_, video_frame);
}
}
@@ -590,12 +550,7 @@ void GLRenderingVDAClient::NotifyResetDone() {
if (decoder_deleted())
return;
- // Clear pending_pictures and reuse them.
- while (!pending_picture_buffer_ids_.empty()) {
- decoder_->ReusePictureBuffer(pending_picture_buffer_ids_.front());
- pending_picture_buffer_ids_.pop_front();
- }
- pending_picture_updated_ = true;
+ rendering_helper_->DropPendingFrames(window_id_);
if (reset_after_frame_num_ == MID_STREAM_RESET) {
reset_after_frame_num_ = END_OF_STREAM_RESET;
@@ -637,10 +592,6 @@ void GLRenderingVDAClient::OutputFrameDeliveryTimes(base::File* output) {
}
}
-void GLRenderingVDAClient::NotifyFrameDropped(int32 picture_buffer_id) {
- decoder_->ReusePictureBuffer(picture_buffer_id);
-}
-
static bool LookingAtNAL(const std::string& encoded, size_t pos) {
return encoded[pos] == 0 && encoded[pos + 1] == 0 &&
encoded[pos + 2] == 0 && encoded[pos + 3] == 1;
@@ -1127,7 +1078,8 @@ TEST_P(VideoDecodeAcceleratorParamTest, TestSimpleDecode) {
}
GLRenderingVDAClient* client =
- new GLRenderingVDAClient(&rendering_helper_,
+ new GLRenderingVDAClient(index,
+ &rendering_helper_,
note,
video_file->data_str,
num_in_flight_decodes,
@@ -1143,7 +1095,10 @@ TEST_P(VideoDecodeAcceleratorParamTest, TestSimpleDecode) {
render_as_thumbnails);
clients[index] = client;
- helper_params.clients.push_back(client->AsWeakPtr());
+ helper_params.window_sizes.push_back(
+ render_as_thumbnails
+ ? kThumbnailsPageSize
+ : gfx::Size(video_file->width, video_file->height));
}
InitializeRenderingHelper(helper_params);
@@ -1376,7 +1331,8 @@ TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
ClientStateNotification<ClientState>* note =
new ClientStateNotification<ClientState>();
GLRenderingVDAClient* client =
- new GLRenderingVDAClient(&rendering_helper_,
+ new GLRenderingVDAClient(0,
+ &rendering_helper_,
note,
test_video_files_[0]->data_str,
1,
@@ -1390,7 +1346,8 @@ TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
std::numeric_limits<int>::max(),
kWebRtcDecodeCallsPerSecond,
false /* render_as_thumbnail */);
- helper_params.clients.push_back(client->AsWeakPtr());
+ helper_params.window_sizes.push_back(
+ gfx::Size(test_video_files_[0]->width, test_video_files_[0]->height));
InitializeRenderingHelper(helper_params);
CreateAndStartDecoder(client, note);
WaitUntilDecodeFinish(note);