diff options
-rw-r--r-- | content/renderer/media/rtc_video_decoder.cc | 45 | ||||
-rw-r--r-- | media/base/pts_stream_unittest.cc | 2 | ||||
-rw-r--r-- | media/base/video_frame.cc | 187 | ||||
-rw-r--r-- | media/base/video_frame.h | 53 | ||||
-rw-r--r-- | media/base/video_frame_unittest.cc | 35 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder.cc | 8 | ||||
-rw-r--r-- | media/filters/ffmpeg_video_decoder_unittest.cc | 5 | ||||
-rw-r--r-- | media/filters/video_renderer_base_unittest.cc | 9 | ||||
-rw-r--r-- | media/tools/scaler_bench/scaler_bench.cc | 26 | ||||
-rw-r--r-- | media/tools/shader_bench/cpu_color_painter.cc | 13 | ||||
-rw-r--r-- | media/tools/shader_bench/shader_bench.cc | 14 | ||||
-rw-r--r-- | media/video/ffmpeg_video_decode_engine.cc | 21 | ||||
-rw-r--r-- | media/video/ffmpeg_video_decode_engine_unittest.cc | 11 | ||||
-rw-r--r-- | remoting/base/codec_test.cc | 8 | ||||
-rw-r--r-- | remoting/client/plugin/pepper_view.cc | 12 |
15 files changed, 185 insertions, 264 deletions
diff --git a/content/renderer/media/rtc_video_decoder.cc b/content/renderer/media/rtc_video_decoder.cc index d9fbac7..6a92775 100644 --- a/content/renderer/media/rtc_video_decoder.cc +++ b/content/renderer/media/rtc_video_decoder.cc @@ -131,37 +131,7 @@ void RTCVideoDecoder::Seek(base::TimeDelta time, const FilterStatusCB& cb) { // Create output buffer pool and pass the frames to renderer // so that the renderer can complete the seeking for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) { - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateFrame(VideoFrame::YV12, - width_, - height_, - kNoTimestamp, - kNoTimestamp, - &video_frame); - if (!video_frame.get()) { - break; - } - - // Create black frame - const uint8 kBlackY = 0x00; - const uint8 kBlackUV = 0x80; - // Fill the Y plane. - uint8* y_plane = video_frame->data(VideoFrame::kYPlane); - for (size_t i = 0; i < height_; ++i) { - memset(y_plane, kBlackY, width_); - y_plane += video_frame->stride(VideoFrame::kYPlane); - } - // Fill the U and V planes. - uint8* u_plane = video_frame->data(VideoFrame::kUPlane); - uint8* v_plane = video_frame->data(VideoFrame::kVPlane); - for (size_t i = 0; i < (height_ / 2); ++i) { - memset(u_plane, kBlackUV, width_ / 2); - memset(v_plane, kBlackUV, width_ / 2); - u_plane += video_frame->stride(VideoFrame::kUPlane); - v_plane += video_frame->stride(VideoFrame::kVPlane); - } - - VideoFrameReady(video_frame); + VideoFrameReady(VideoFrame::CreateBlackFrame(width_, height_)); } state_ = kNormal; @@ -220,19 +190,18 @@ bool RTCVideoDecoder::RenderFrame(const cricket::VideoFrame* frame) { // Check if there's a size change if (video_frame->width() != width_ || video_frame->height() != height_) { - video_frame.release(); // Allocate new buffer based on the new size - VideoFrame::CreateFrame(VideoFrame::YV12, - width_, - height_, - kNoTimestamp, - kNoTimestamp, - &video_frame); + video_frame = VideoFrame::CreateFrame(VideoFrame::YV12, + width_, + height_, + kNoTimestamp, + kNoTimestamp); } video_frame->SetTimestamp(host()->GetTime()); video_frame->SetDuration(base::TimeDelta::FromMilliseconds(30)); + // TODO(scherkus): deduplicate YUV copying code. uint8* y_plane = video_frame->data(VideoFrame::kYPlane); const uint8* y_plane_src = frame->GetYPlane(); for (size_t row = 0; row < video_frame->height(); ++row) { diff --git a/media/base/pts_stream_unittest.cc b/media/base/pts_stream_unittest.cc index abab227..22f3807 100644 --- a/media/base/pts_stream_unittest.cc +++ b/media/base/pts_stream_unittest.cc @@ -11,7 +11,7 @@ namespace media { class PtsStreamTest : public testing::Test { public: PtsStreamTest() { - VideoFrame::CreateBlackFrame(16, 16, &video_frame_); + video_frame_ = VideoFrame::CreateBlackFrame(16, 16); // Use typical frame rate of 25 fps. base::TimeDelta frame_duration = base::TimeDelta::FromMicroseconds(40000); diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc index 332c3c5..c78b5a4 100644 --- a/media/base/video_frame.cc +++ b/media/base/video_frame.cc @@ -29,117 +29,103 @@ size_t VideoFrame::GetNumberOfPlanes(VideoFrame::Format format) { } // static -void VideoFrame::CreateFrame(VideoFrame::Format format, - size_t width, - size_t height, - base::TimeDelta timestamp, - base::TimeDelta duration, - scoped_refptr<VideoFrame>* frame_out) { +scoped_refptr<VideoFrame> VideoFrame::CreateFrame( + VideoFrame::Format format, + size_t width, + size_t height, + base::TimeDelta timestamp, + base::TimeDelta duration) { DCHECK(width > 0 && height > 0); DCHECK(width * height < 100000000); - DCHECK(frame_out); - bool alloc_worked = false; scoped_refptr<VideoFrame> frame( new VideoFrame(VideoFrame::TYPE_SYSTEM_MEMORY, format, width, height)); - if (frame) { - frame->SetTimestamp(timestamp); - frame->SetDuration(duration); - switch (format) { - case VideoFrame::RGB555: - case VideoFrame::RGB565: - alloc_worked = frame->AllocateRGB(2u); - break; - case VideoFrame::RGB24: - alloc_worked = frame->AllocateRGB(3u); - break; - case VideoFrame::RGB32: - case VideoFrame::RGBA: - alloc_worked = frame->AllocateRGB(4u); - break; - case VideoFrame::YV12: - case VideoFrame::YV16: - alloc_worked = frame->AllocateYUV(); - break; - case VideoFrame::ASCII: - alloc_worked = frame->AllocateRGB(1u); - break; - default: - NOTREACHED(); - alloc_worked = false; - break; - } + frame->SetTimestamp(timestamp); + frame->SetDuration(duration); + switch (format) { + case VideoFrame::RGB555: + case VideoFrame::RGB565: + frame->AllocateRGB(2u); + break; + case VideoFrame::RGB24: + frame->AllocateRGB(3u); + break; + case VideoFrame::RGB32: + case VideoFrame::RGBA: + frame->AllocateRGB(4u); + break; + case VideoFrame::YV12: + case VideoFrame::YV16: + frame->AllocateYUV(); + break; + case VideoFrame::ASCII: + frame->AllocateRGB(1u); + break; + default: + NOTREACHED(); + return NULL; } - *frame_out = alloc_worked ? frame : NULL; + return frame; } // static -void VideoFrame::CreateFrameExternal(SurfaceType type, - Format format, - size_t width, - size_t height, - size_t planes, - uint8* const data[kMaxPlanes], - const int32 strides[kMaxPlanes], - base::TimeDelta timestamp, - base::TimeDelta duration, - void* private_buffer, - scoped_refptr<VideoFrame>* frame_out) { - DCHECK(frame_out); +scoped_refptr<VideoFrame> VideoFrame::CreateFrameExternal( + SurfaceType type, + Format format, + size_t width, + size_t height, + size_t planes, + uint8* const data[kMaxPlanes], + const int32 strides[kMaxPlanes], + base::TimeDelta timestamp, + base::TimeDelta duration, + void* private_buffer) { scoped_refptr<VideoFrame> frame( new VideoFrame(type, format, width, height)); - if (frame) { - frame->SetTimestamp(timestamp); - frame->SetDuration(duration); - frame->external_memory_ = true; - frame->planes_ = planes; - frame->private_buffer_ = private_buffer; - for (size_t i = 0; i < kMaxPlanes; ++i) { - frame->data_[i] = data[i]; - frame->strides_[i] = strides[i]; - } + frame->SetTimestamp(timestamp); + frame->SetDuration(duration); + frame->external_memory_ = true; + frame->planes_ = planes; + frame->private_buffer_ = private_buffer; + for (size_t i = 0; i < kMaxPlanes; ++i) { + frame->data_[i] = data[i]; + frame->strides_[i] = strides[i]; } - *frame_out = frame; + return frame; } // static -void VideoFrame::CreateFrameGlTexture(Format format, - size_t width, - size_t height, - GlTexture const textures[kMaxPlanes], - scoped_refptr<VideoFrame>* frame_out) { - DCHECK(frame_out); +scoped_refptr<VideoFrame> VideoFrame::CreateFrameGlTexture( + Format format, + size_t width, + size_t height, + GlTexture const textures[kMaxPlanes]) { scoped_refptr<VideoFrame> frame( new VideoFrame(TYPE_GL_TEXTURE, format, width, height)); - if (frame) { - frame->external_memory_ = true; - frame->planes_ = GetNumberOfPlanes(format); - for (size_t i = 0; i < kMaxPlanes; ++i) { - frame->gl_textures_[i] = textures[i]; - // TODO(hclam): Fix me for color format other than RGBA. - frame->strides_[i] = width; - } + frame->external_memory_ = true; + frame->planes_ = GetNumberOfPlanes(format); + for (size_t i = 0; i < kMaxPlanes; ++i) { + frame->gl_textures_[i] = textures[i]; + // TODO(hclam): Fix me for color format other than RGBA. + frame->strides_[i] = width; } - *frame_out = frame; + return frame; } // static -void VideoFrame::CreateEmptyFrame(scoped_refptr<VideoFrame>* frame_out) { - *frame_out = new VideoFrame(VideoFrame::TYPE_SYSTEM_MEMORY, - VideoFrame::EMPTY, 0, 0); +scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() { + return new VideoFrame(VideoFrame::TYPE_SYSTEM_MEMORY, + VideoFrame::EMPTY, 0, 0); } // static -void VideoFrame::CreateBlackFrame(int width, int height, - scoped_refptr<VideoFrame>* frame_out) { +scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(int width, int height) { DCHECK_GT(width, 0); DCHECK_GT(height, 0); // Create our frame. - scoped_refptr<VideoFrame> frame; const base::TimeDelta kZero; - VideoFrame::CreateFrame(VideoFrame::YV12, width, height, kZero, kZero, - &frame); - DCHECK(frame); + scoped_refptr<VideoFrame> frame = + VideoFrame::CreateFrame(VideoFrame::YV12, width, height, kZero, kZero); // Now set the data to YUV(0,128,128). const uint8 kBlackY = 0x00; @@ -162,8 +148,7 @@ void VideoFrame::CreateBlackFrame(int width, int height, v_plane += frame->stride(VideoFrame::kVPlane); } - // Success! - *frame_out = frame; + return frame; } static inline size_t RoundUp(size_t value, size_t alignment) { @@ -172,24 +157,21 @@ static inline size_t RoundUp(size_t value, size_t alignment) { return ((value + (alignment - 1)) & ~(alignment-1)); } -bool VideoFrame::AllocateRGB(size_t bytes_per_pixel) { +void VideoFrame::AllocateRGB(size_t bytes_per_pixel) { // Round up to align at a 64-bit (8 byte) boundary for each row. This // is sufficient for MMX reads (movq). size_t bytes_per_row = RoundUp(width_ * bytes_per_pixel, 8); planes_ = VideoFrame::kNumRGBPlanes; strides_[VideoFrame::kRGBPlane] = bytes_per_row; data_[VideoFrame::kRGBPlane] = new uint8[bytes_per_row * height_]; - DCHECK(data_[VideoFrame::kRGBPlane]); DCHECK(!(reinterpret_cast<intptr_t>(data_[VideoFrame::kRGBPlane]) & 7)); COMPILE_ASSERT(0 == VideoFrame::kRGBPlane, RGB_data_must_be_index_0); - return (NULL != data_[VideoFrame::kRGBPlane]); } -static const int kFramePadBytes = 15; // allows faster SIMD YUV convert +static const int kFramePadBytes = 15; // Allows faster SIMD YUV convert. -bool VideoFrame::AllocateYUV() { - DCHECK(format_ == VideoFrame::YV12 || - format_ == VideoFrame::YV16); +void VideoFrame::AllocateYUV() { + DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16); // Align Y rows at 32-bit (4 byte) boundaries. The stride for both YV12 and // YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for U and V // applies to two rows of Y (one byte of UV for 4 bytes of Y), so in the @@ -208,19 +190,14 @@ bool VideoFrame::AllocateYUV() { uv_bytes /= 2; } uint8* data = new uint8[y_bytes + (uv_bytes * 2) + kFramePadBytes]; - if (data) { - planes_ = VideoFrame::kNumYUVPlanes; - COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0); - data_[VideoFrame::kYPlane] = data; - data_[VideoFrame::kUPlane] = data + y_bytes; - data_[VideoFrame::kVPlane] = data + y_bytes + uv_bytes; - strides_[VideoFrame::kYPlane] = y_bytes_per_row; - strides_[VideoFrame::kUPlane] = uv_stride; - strides_[VideoFrame::kVPlane] = uv_stride; - return true; - } - NOTREACHED(); - return false; + planes_ = VideoFrame::kNumYUVPlanes; + COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0); + data_[VideoFrame::kYPlane] = data; + data_[VideoFrame::kUPlane] = data + y_bytes; + data_[VideoFrame::kVPlane] = data + y_bytes + uv_bytes; + strides_[VideoFrame::kYPlane] = y_bytes_per_row; + strides_[VideoFrame::kUPlane] = uv_stride; + strides_[VideoFrame::kVPlane] = uv_stride; } VideoFrame::VideoFrame(VideoFrame::SurfaceType type, diff --git a/media/base/video_frame.h b/media/base/video_frame.h index ab552895..a694c5f 100644 --- a/media/base/video_frame.h +++ b/media/base/video_frame.h @@ -57,43 +57,42 @@ class VideoFrame : public StreamSample { // Creates a new frame in system memory with given parameters. Buffers for // the frame are allocated but not initialized. - static void CreateFrame(Format format, - size_t width, - size_t height, - base::TimeDelta timestamp, - base::TimeDelta duration, - scoped_refptr<VideoFrame>* frame_out); + static scoped_refptr<VideoFrame> CreateFrame( + Format format, + size_t width, + size_t height, + base::TimeDelta timestamp, + base::TimeDelta duration); // Creates a new frame with given parameters. Buffers for the frame are // provided externally. Reference to the buffers and strides are copied // from |data| and |strides| respectively. - static void CreateFrameExternal(SurfaceType type, - Format format, - size_t width, - size_t height, - size_t planes, - uint8* const data[kMaxPlanes], - const int32 strides[kMaxPlanes], - base::TimeDelta timestamp, - base::TimeDelta duration, - void* private_buffer, - scoped_refptr<VideoFrame>* frame_out); + static scoped_refptr<VideoFrame> CreateFrameExternal( + SurfaceType type, + Format format, + size_t width, + size_t height, + size_t planes, + uint8* const data[kMaxPlanes], + const int32 strides[kMaxPlanes], + base::TimeDelta timestamp, + base::TimeDelta duration, + void* private_buffer); // Creates a new frame with GL textures. - static void CreateFrameGlTexture(Format format, - size_t width, - size_t height, - GlTexture const textures[kMaxPlanes], - scoped_refptr<VideoFrame>* frame_out); + static scoped_refptr<VideoFrame> CreateFrameGlTexture( + Format format, + size_t width, + size_t height, + GlTexture const textures[kMaxPlanes]); // Creates a frame with format equals to VideoFrame::EMPTY, width, height // timestamp and duration are all 0. - static void CreateEmptyFrame(scoped_refptr<VideoFrame>* frame_out); + static scoped_refptr<VideoFrame> CreateEmptyFrame(); // Allocates YV12 frame based on |width| and |height|, and sets its data to // the YUV equivalent of RGB(0,0,0). - static void CreateBlackFrame(int width, int height, - scoped_refptr<VideoFrame>* frame_out); + static scoped_refptr<VideoFrame> CreateBlackFrame(int width, int height); SurfaceType type() const { return type_; } @@ -130,8 +129,8 @@ class VideoFrame : public StreamSample { virtual ~VideoFrame(); // Used internally by CreateFrame(). - bool AllocateRGB(size_t bytes_per_pixel); - bool AllocateYUV(); + void AllocateRGB(size_t bytes_per_pixel); + void AllocateYUV(); // Frame format. Format format_; diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc index a889271..8f6b4e7 100644 --- a/media/base/video_frame_unittest.cc +++ b/media/base/video_frame_unittest.cc @@ -52,12 +52,11 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) { yv12_frame->stride(VideoFrame::kVPlane)); scoped_refptr<media::VideoFrame> rgb_frame; - media::VideoFrame::CreateFrame(VideoFrame::RGBA, - yv12_frame->width(), - yv12_frame->height(), - yv12_frame->GetTimestamp(), - yv12_frame->GetDuration(), - &rgb_frame); + rgb_frame = media::VideoFrame::CreateFrame(VideoFrame::RGBA, + yv12_frame->width(), + yv12_frame->height(), + yv12_frame->GetTimestamp(), + yv12_frame->GetDuration()); ASSERT_EQ(yv12_frame->width(), rgb_frame->width()); ASSERT_EQ(yv12_frame->height(), rgb_frame->height()); @@ -95,9 +94,9 @@ TEST(VideoFrame, CreateFrame) { const base::TimeDelta kDurationB = base::TimeDelta::FromMicroseconds(5678); // Create a YV12 Video Frame. - scoped_refptr<media::VideoFrame> frame; - VideoFrame::CreateFrame(media::VideoFrame::YV12, kWidth, kHeight, - kTimestampA, kDurationA, &frame); + scoped_refptr<media::VideoFrame> frame = + VideoFrame::CreateFrame(media::VideoFrame::YV12, kWidth, kHeight, + kTimestampA, kDurationA); ASSERT_TRUE(frame); // Test StreamSample implementation. @@ -129,7 +128,7 @@ TEST(VideoFrame, CreateFrame) { } // Test an empty frame. - VideoFrame::CreateEmptyFrame(&frame); + frame = VideoFrame::CreateEmptyFrame(); EXPECT_TRUE(frame->IsEndOfStream()); } @@ -139,8 +138,8 @@ TEST(VideoFrame, CreateBlackFrame) { const uint8 kExpectedYRow[] = { 0, 0 }; const uint8 kExpectedUVRow[] = { 128 }; - scoped_refptr<media::VideoFrame> frame; - VideoFrame::CreateBlackFrame(kWidth, kHeight, &frame); + scoped_refptr<media::VideoFrame> frame = + VideoFrame::CreateBlackFrame(kWidth, kHeight); ASSERT_TRUE(frame); // Test basic properties. @@ -174,14 +173,14 @@ TEST(VideoFrame, CreateBlackFrame) { TEST(VideoFram, CreateExternalFrame) { scoped_array<uint8> memory(new uint8[1]); - scoped_refptr<media::VideoFrame> frame; uint8* data[3] = {memory.get(), NULL, NULL}; int strides[3] = {1, 0, 0}; - VideoFrame::CreateFrameExternal(media::VideoFrame::TYPE_SYSTEM_MEMORY, - media::VideoFrame::RGB32, 0, 0, 3, - data, strides, - base::TimeDelta(), base::TimeDelta(), - NULL, &frame); + scoped_refptr<media::VideoFrame> frame = + VideoFrame::CreateFrameExternal(media::VideoFrame::TYPE_SYSTEM_MEMORY, + media::VideoFrame::RGB32, 0, 0, 3, + data, strides, + base::TimeDelta(), base::TimeDelta(), + NULL); ASSERT_TRUE(frame); // Test frame properties. diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc index 449b6aa..cc888d0 100644 --- a/media/filters/ffmpeg_video_decoder.cc +++ b/media/filters/ffmpeg_video_decoder.cc @@ -297,9 +297,7 @@ void FFmpegVideoDecoder::ProduceVideoFrame( // If the decoding is finished, we just always return empty frames. if (state_ == kDecodeFinished) { // Signal VideoRenderer the end of the stream event. - scoped_refptr<VideoFrame> empty_frame; - VideoFrame::CreateEmptyFrame(&empty_frame); - VideoFrameReady(empty_frame); + VideoFrameReady(VideoFrame::CreateEmptyFrame()); // Fall through, because we still need to keep record of this frame. } @@ -338,9 +336,7 @@ void FFmpegVideoDecoder::ConsumeVideoFrame( state_ = kDecodeFinished; // Signal VideoRenderer the end of the stream event. - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateEmptyFrame(&video_frame); - VideoFrameReady(video_frame); + VideoFrameReady(VideoFrame::CreateEmptyFrame()); } } } diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc index 38411d3..3aa7769 100644 --- a/media/filters/ffmpeg_video_decoder_unittest.cc +++ b/media/filters/ffmpeg_video_decoder_unittest.cc @@ -159,9 +159,8 @@ class FFmpegVideoDecoderTest : public testing::Test { memset(&codec_, 0, sizeof(codec_)); memset(&yuv_frame_, 0, sizeof(yuv_frame_)); base::TimeDelta zero; - VideoFrame::CreateFrame(VideoFrame::YV12, kWidth, kHeight, - zero, zero, &video_frame_); - + video_frame_ = VideoFrame::CreateFrame(VideoFrame::YV12, kWidth, kHeight, + zero, zero); stream_.codec = &codec_context_; codec_context_.width = kWidth; codec_context_.height = kHeight; diff --git a/media/filters/video_renderer_base_unittest.cc b/media/filters/video_renderer_base_unittest.cc index 8831da1..3605011 100644 --- a/media/filters/video_renderer_base_unittest.cc +++ b/media/filters/video_renderer_base_unittest.cc @@ -155,11 +155,10 @@ class VideoRendererBaseTest : public ::testing::Test { void CreateFrame(int64 timestamp, int64 duration) { const base::TimeDelta kZero; - scoped_refptr<VideoFrame> frame; - VideoFrame::CreateFrame(VideoFrame::RGB32, kWidth, kHeight, - base::TimeDelta::FromMicroseconds(timestamp), - base::TimeDelta::FromMicroseconds(duration), - &frame); + scoped_refptr<VideoFrame> frame = + VideoFrame::CreateFrame(VideoFrame::RGB32, kWidth, kHeight, + base::TimeDelta::FromMicroseconds(timestamp), + base::TimeDelta::FromMicroseconds(duration)); decoder_->VideoFrameReadyForTest(frame); } diff --git a/media/tools/scaler_bench/scaler_bench.cc b/media/tools/scaler_bench/scaler_bench.cc index 5a5e093..c794f3a 100644 --- a/media/tools/scaler_bench/scaler_bench.cc +++ b/media/tools/scaler_bench/scaler_bench.cc @@ -33,9 +33,8 @@ static double BenchmarkSkia() { std::vector<scoped_refptr<VideoFrame> > source_frames; ScopedVector<SkBitmap> dest_frames; for (int i = 0; i < num_buffers; i++) { - scoped_refptr<VideoFrame> source_frame; - VideoFrame::CreateBlackFrame(source_width, source_height, &source_frame); - source_frames.push_back(source_frame); + source_frames.push_back( + VideoFrame::CreateBlackFrame(source_width, source_height)); SkBitmap* bitmap = new SkBitmap(); bitmap->setConfig(SkBitmap::kARGB_8888_Config, @@ -118,18 +117,15 @@ static double BenchmarkFilter(media::ScaleFilter filter) { std::vector<scoped_refptr<VideoFrame> > dest_frames; for (int i = 0; i < num_buffers; i++) { - scoped_refptr<VideoFrame> source_frame; - VideoFrame::CreateBlackFrame(source_width, source_height, &source_frame); - source_frames.push_back(source_frame); - - scoped_refptr<VideoFrame> dest_frame; - VideoFrame::CreateFrame(VideoFrame::RGB32, - dest_width, - dest_height, - TimeDelta::FromSeconds(0), - TimeDelta::FromSeconds(0), - &dest_frame); - dest_frames.push_back(dest_frame); + source_frames.push_back( + VideoFrame::CreateBlackFrame(source_width, source_height)); + + dest_frames.push_back( + VideoFrame::CreateFrame(VideoFrame::RGB32, + dest_width, + dest_height, + TimeDelta::FromSeconds(0), + TimeDelta::FromSeconds(0))); } TimeTicks start = TimeTicks::HighResNow(); diff --git a/media/tools/shader_bench/cpu_color_painter.cc b/media/tools/shader_bench/cpu_color_painter.cc index 5b8f240..7743e7e 100644 --- a/media/tools/shader_bench/cpu_color_painter.cc +++ b/media/tools/shader_bench/cpu_color_painter.cc @@ -63,13 +63,12 @@ void CPUColorPainter::Initialize(int width, int height) { void CPUColorPainter::Paint(scoped_refptr<media::VideoFrame> video_frame) { // Convert to RGBA frame. - scoped_refptr<media::VideoFrame> rgba_frame; - media::VideoFrame::CreateFrame(media::VideoFrame::RGBA, - video_frame->width(), - video_frame->height(), - base::TimeDelta(), - base::TimeDelta(), - &rgba_frame); + scoped_refptr<media::VideoFrame> rgba_frame = + media::VideoFrame::CreateFrame(media::VideoFrame::RGBA, + video_frame->width(), + video_frame->height(), + base::TimeDelta(), + base::TimeDelta()); media::ConvertYUVToRGB32(video_frame->data(media::VideoFrame::kYPlane), video_frame->data(media::VideoFrame::kUPlane), diff --git a/media/tools/shader_bench/shader_bench.cc b/media/tools/shader_bench/shader_bench.cc index 948b373..88905e9 100644 --- a/media/tools/shader_bench/shader_bench.cc +++ b/media/tools/shader_bench/shader_bench.cc @@ -52,14 +52,12 @@ void GetFrames(std::string file_name, long frame_size = CalculateYUVFrameSize(file_handle, num_frames); for (int i = 0; i < num_frames; i++) { - scoped_refptr<media::VideoFrame> video_frame; - - media::VideoFrame::CreateFrame(media::VideoFrame::YV12, - width, - height, - base::TimeDelta(), - base::TimeDelta(), - &video_frame); + scoped_refptr<media::VideoFrame> video_frame = + media::VideoFrame::CreateFrame(media::VideoFrame::YV12, + width, + height, + base::TimeDelta(), + base::TimeDelta()); long bytes_read = fread(video_frame->data(0), 1, frame_size, file_handle); diff --git a/media/video/ffmpeg_video_decode_engine.cc b/media/video/ffmpeg_video_decode_engine.cc index b584f78..ef56987 100644 --- a/media/video/ffmpeg_video_decode_engine.cc +++ b/media/video/ffmpeg_video_decode_engine.cc @@ -109,29 +109,22 @@ void FFmpegVideoDecodeEngine::Initialize( info.stream_info.surface_height = config.surface_height(); // If we do not have enough buffers, we will report error too. - bool buffer_allocated = true; frame_queue_available_.clear(); // Create output buffer pool when direct rendering is not used. for (size_t i = 0; i < Limits::kMaxVideoFrames; ++i) { - scoped_refptr<VideoFrame> video_frame; - VideoFrame::CreateFrame(VideoFrame::YV12, - config.width(), - config.height(), - kNoTimestamp, - kNoTimestamp, - &video_frame); - if (!video_frame.get()) { - buffer_allocated = false; - break; - } + scoped_refptr<VideoFrame> video_frame = + VideoFrame::CreateFrame(VideoFrame::YV12, + config.width(), + config.height(), + kNoTimestamp, + kNoTimestamp); frame_queue_available_.push_back(video_frame); } codec_context_->thread_count = decode_threads; if (codec && avcodec_open(codec_context_, codec) >= 0 && - av_frame_.get() && - buffer_allocated) { + av_frame_.get()) { info.success = true; } event_handler_ = event_handler; diff --git a/media/video/ffmpeg_video_decode_engine_unittest.cc b/media/video/ffmpeg_video_decode_engine_unittest.cc index 2527d4f..01016d4 100644 --- a/media/video/ffmpeg_video_decode_engine_unittest.cc +++ b/media/video/ffmpeg_video_decode_engine_unittest.cc @@ -68,12 +68,11 @@ class FFmpegVideoDecodeEngineTest test_engine_.reset(new FFmpegVideoDecodeEngine()); - VideoFrame::CreateFrame(VideoFrame::YV12, - kWidth, - kHeight, - kNoTimestamp, - kNoTimestamp, - &video_frame_); + video_frame_ = VideoFrame::CreateFrame(VideoFrame::YV12, + kWidth, + kHeight, + kNoTimestamp, + kNoTimestamp); } ~FFmpegVideoDecodeEngineTest() { diff --git a/remoting/base/codec_test.cc b/remoting/base/codec_test.cc index 38593fb..d1b4978 100644 --- a/remoting/base/codec_test.cc +++ b/remoting/base/codec_test.cc @@ -121,10 +121,10 @@ class DecoderTester { DecoderTester(Decoder* decoder) : strict_(false), decoder_(decoder) { - media::VideoFrame::CreateFrame(media::VideoFrame::RGB32, - kWidth, kHeight, - base::TimeDelta(), - base::TimeDelta(), &frame_); + frame_ = media::VideoFrame::CreateFrame(media::VideoFrame::RGB32, + kWidth, kHeight, + base::TimeDelta(), + base::TimeDelta()); EXPECT_TRUE(frame_.get()); decoder_->Initialize(frame_); } diff --git a/remoting/client/plugin/pepper_view.cc b/remoting/client/plugin/pepper_view.cc index 714c1e0..bf5ae1c 100644 --- a/remoting/client/plugin/pepper_view.cc +++ b/remoting/client/plugin/pepper_view.cc @@ -430,13 +430,11 @@ void PepperView::AllocateFrame(media::VideoFrame::Format format, Task* done) { DCHECK(CurrentlyOnPluginThread()); - media::VideoFrame::CreateFrame(media::VideoFrame::RGB32, - width, height, - base::TimeDelta(), base::TimeDelta(), - frame_out); - if (*frame_out) { - (*frame_out)->AddRef(); - } + *frame_out = media::VideoFrame::CreateFrame(media::VideoFrame::RGB32, + width, height, + base::TimeDelta(), + base::TimeDelta()); + (*frame_out)->AddRef(); done->Run(); delete done; } |