summaryrefslogtreecommitdiffstats
path: root/remoting/codec
diff options
context:
space:
mode:
authorsergeyu <sergeyu@chromium.org>2015-08-20 15:47:37 -0700
committerCommit bot <commit-bot@chromium.org>2015-08-20 22:48:03 +0000
commitef92de20194ecc989930346de7d2c9beb0a88e6e (patch)
tree5cdbfa291544f82a384092d5d91cc8c54ed50275 /remoting/codec
parent1ed0cbab7569ef9327bab4ebca140df67870a5e3 (diff)
downloadchromium_src-ef92de20194ecc989930346de7d2c9beb0a88e6e.zip
chromium_src-ef92de20194ecc989930346de7d2c9beb0a88e6e.tar.gz
chromium_src-ef92de20194ecc989930346de7d2c9beb0a88e6e.tar.bz2
Simplify VideoDecoder interface
Previously VideoDecoder allowed to resize the frames. It's no longer used anywhere, so the interface can be simplified. BUG=256850, 486917 Review URL: https://codereview.chromium.org/1301233002 Cr-Commit-Position: refs/heads/master@{#344603}
Diffstat (limited to 'remoting/codec')
-rw-r--r--remoting/codec/codec_test.cc133
-rw-r--r--remoting/codec/codec_test.h1
-rw-r--r--remoting/codec/video_decoder.h48
-rw-r--r--remoting/codec/video_decoder_verbatim.cc110
-rw-r--r--remoting/codec/video_decoder_verbatim.h23
-rw-r--r--remoting/codec/video_decoder_vpx.cc231
-rw-r--r--remoting/codec/video_decoder_vpx.h27
-rw-r--r--remoting/codec/video_decoder_vpx_unittest.cc76
8 files changed, 165 insertions, 484 deletions
diff --git a/remoting/codec/codec_test.cc b/remoting/codec/codec_test.cc
index 47e64e5..cec0043 100644
--- a/remoting/codec/codec_test.cc
+++ b/remoting/codec/codec_test.cc
@@ -9,10 +9,11 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/video_frame.h"
+#include "remoting/base/util.h"
#include "remoting/codec/codec_test.h"
#include "remoting/codec/video_decoder.h"
#include "remoting/codec/video_encoder.h"
-#include "remoting/base/util.h"
+#include "remoting/proto/video.pb.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/webrtc/modules/desktop_capture/desktop_frame.h"
@@ -54,53 +55,32 @@ namespace remoting {
class VideoDecoderTester {
public:
- VideoDecoderTester(VideoDecoder* decoder,
- const DesktopSize& screen_size,
- const DesktopSize& view_size)
- : screen_size_(screen_size),
- view_size_(view_size),
- strict_(false),
+ VideoDecoderTester(VideoDecoder* decoder, const DesktopSize& screen_size)
+ : strict_(false),
decoder_(decoder),
- frame_(nullptr) {
- image_data_.reset(new uint8[
- view_size_.width() * view_size_.height() * kBytesPerPixel]);
- EXPECT_TRUE(image_data_.get());
- }
+ frame_(new BasicDesktopFrame(screen_size)),
+ expected_frame_(nullptr) {}
void Reset() {
+ frame_.reset(new BasicDesktopFrame(frame_->size()));
expected_region_.Clear();
- update_region_.Clear();
}
void ResetRenderedData() {
- memset(image_data_.get(), 0,
- view_size_.width() * view_size_.height() * kBytesPerPixel);
- }
-
- void ReceivedPacket(VideoPacket* packet) {
- ASSERT_TRUE(decoder_->DecodePacket(*packet));
-
- RenderFrame();
- }
-
- void RenderFrame() {
- decoder_->RenderFrame(
- DesktopSize(view_size_.width(), view_size_.height()),
- DesktopRect::MakeWH(view_size_.width(), view_size_.height()),
- image_data_.get(), view_size_.width() * kBytesPerPixel,
- &update_region_);
+ memset(frame_->data(), 0,
+ frame_->size().width() * frame_->size().height() * kBytesPerPixel);
}
- void ReceivedScopedPacket(scoped_ptr<VideoPacket> packet) {
- ReceivedPacket(packet.get());
+ void ReceivedPacket(scoped_ptr<VideoPacket> packet) {
+ ASSERT_TRUE(decoder_->DecodePacket(*packet, frame_.get()));
}
void set_strict(bool strict) {
strict_ = strict;
}
- void set_frame(DesktopFrame* frame) {
- frame_ = frame;
+ void set_expected_frame(DesktopFrame* frame) {
+ expected_frame_ = frame;
}
void AddRegion(const DesktopRegion& region) {
@@ -111,25 +91,23 @@ class VideoDecoderTester {
if (!strict_)
return;
- ASSERT_TRUE(frame_);
+ ASSERT_TRUE(expected_frame_);
// Test the content of the update region.
- EXPECT_TRUE(expected_region_.Equals(update_region_));
+ EXPECT_TRUE(expected_region_.Equals(frame_->updated_region()));
- for (DesktopRegion::Iterator i(update_region_); !i.IsAtEnd();
+ for (DesktopRegion::Iterator i(frame_->updated_region()); !i.IsAtEnd();
i.Advance()) {
- const int stride = view_size_.width() * kBytesPerPixel;
- EXPECT_EQ(stride, frame_->stride());
- const int offset = stride * i.rect().top() +
- kBytesPerPixel * i.rect().left();
- const uint8* original = frame_->data() + offset;
- const uint8* decoded = image_data_.get() + offset;
+ const uint8_t* original =
+ expected_frame_->GetFrameDataAtPos(i.rect().top_left());
+ const uint8_t* decoded =
+ frame_->GetFrameDataAtPos(i.rect().top_left());
const int row_size = kBytesPerPixel * i.rect().width();
for (int y = 0; y < i.rect().height(); ++y) {
EXPECT_EQ(0, memcmp(original, decoded, row_size))
<< "Row " << y << " is different";
- original += stride;
- decoded += stride;
+ original += expected_frame_->stride();
+ decoded += frame_->stride();
}
}
}
@@ -137,27 +115,26 @@ class VideoDecoderTester {
// The error at each pixel is the root mean square of the errors in
// the R, G, and B components, each normalized to [0, 1]. This routine
// checks that the maximum and mean pixel errors do not exceed given limits.
- void VerifyResultsApprox(const uint8* expected_view_data,
- double max_error_limit, double mean_error_limit) {
+ void VerifyResultsApprox(double max_error_limit, double mean_error_limit) {
double max_error = 0.0;
double sum_error = 0.0;
int error_num = 0;
- for (DesktopRegion::Iterator i(update_region_); !i.IsAtEnd();
+ for (DesktopRegion::Iterator i(frame_->updated_region()); !i.IsAtEnd();
i.Advance()) {
- const int stride = view_size_.width() * kBytesPerPixel;
- const int offset = stride * i.rect().top() +
- kBytesPerPixel * i.rect().left();
- const uint8* expected = expected_view_data + offset;
- const uint8* actual = image_data_.get() + offset;
+ const uint8_t* expected =
+ expected_frame_->GetFrameDataAtPos(i.rect().top_left());
+ const uint8_t* actual =
+ frame_->GetFrameDataAtPos(i.rect().top_left());
for (int y = 0; y < i.rect().height(); ++y) {
for (int x = 0; x < i.rect().width(); ++x) {
- double error = CalculateError(expected, actual);
+ double error = CalculateError(expected + x * kBytesPerPixel,
+ actual + x * kBytesPerPixel);
max_error = std::max(max_error, error);
sum_error += error;
++error_num;
- expected += 4;
- actual += 4;
}
+ expected += expected_frame_->stride();
+ actual += frame_->stride();
}
}
EXPECT_LE(max_error, max_error_limit);
@@ -167,7 +144,7 @@ class VideoDecoderTester {
VLOG(0) << "Mean error: " << mean_error;
}
- double CalculateError(const uint8* original, const uint8* decoded) {
+ double CalculateError(const uint8_t* original, const uint8_t* decoded) {
double error_sum_squares = 0.0;
for (int i = 0; i < 3; i++) {
double error = static_cast<double>(*original++) -
@@ -181,14 +158,11 @@ class VideoDecoderTester {
}
private:
- DesktopSize screen_size_;
- DesktopSize view_size_;
bool strict_;
DesktopRegion expected_region_;
- DesktopRegion update_region_;
VideoDecoder* decoder_;
- scoped_ptr<uint8[]> image_data_;
- DesktopFrame* frame_;
+ scoped_ptr<DesktopFrame> frame_;
+ DesktopFrame* expected_frame_;
DISALLOW_COPY_AND_ASSIGN(VideoDecoderTester);
};
@@ -207,7 +181,7 @@ class VideoEncoderTester {
++data_available_;
// Send the message to the VideoDecoderTester.
if (decoder_tester_) {
- decoder_tester_->ReceivedPacket(packet.get());
+ decoder_tester_->ReceivedPacket(packet.Pass());
}
}
@@ -300,7 +274,7 @@ static void TestEncodeDecodeRects(VideoEncoder* encoder,
srand(0);
for (DesktopRegion::Iterator i(region); !i.IsAtEnd(); i.Advance()) {
const int row_size = DesktopFrame::kBytesPerPixel * i.rect().width();
- uint8* memory = frame->data() + frame->stride() * i.rect().top() +
+ uint8_t* memory = frame->data() + frame->stride() * i.rect().top() +
DesktopFrame::kBytesPerPixel * i.rect().left();
for (int y = 0; y < i.rect().height(); ++y) {
for (int x = 0; x < row_size; ++x)
@@ -324,9 +298,9 @@ void TestVideoEncoderDecoder(VideoEncoder* encoder,
scoped_ptr<DesktopFrame> frame = PrepareFrame(kSize);
- VideoDecoderTester decoder_tester(decoder, kSize, kSize);
+ VideoDecoderTester decoder_tester(decoder, kSize);
decoder_tester.set_strict(strict);
- decoder_tester.set_frame(frame.get());
+ decoder_tester.set_expected_frame(frame.get());
encoder_tester.set_decoder_tester(&decoder_tester);
for (const DesktopRegion& region : MakeTestRegionLists(kSize)) {
@@ -337,7 +311,7 @@ void TestVideoEncoderDecoder(VideoEncoder* encoder,
static void FillWithGradient(DesktopFrame* frame) {
for (int j = 0; j < frame->size().height(); ++j) {
- uint8* p = frame->data() + j * frame->stride();
+ uint8_t* p = frame->data() + j * frame->stride();
for (int i = 0; i < frame->size().width(); ++i) {
*p++ = (255.0 * i) / frame->size().width();
*p++ = (164.0 * j) / frame->size().height();
@@ -351,7 +325,6 @@ static void FillWithGradient(DesktopFrame* frame) {
void TestVideoEncoderDecoderGradient(VideoEncoder* encoder,
VideoDecoder* decoder,
const DesktopSize& screen_size,
- const DesktopSize& view_size,
double max_error_limit,
double mean_error_limit) {
scoped_ptr<BasicDesktopFrame> frame(
@@ -359,30 +332,14 @@ void TestVideoEncoderDecoderGradient(VideoEncoder* encoder,
FillWithGradient(frame.get());
frame->mutable_updated_region()->SetRect(DesktopRect::MakeSize(screen_size));
- scoped_ptr<BasicDesktopFrame> expected_result(
- new BasicDesktopFrame(view_size));
- FillWithGradient(expected_result.get());
-
- VideoDecoderTester decoder_tester(decoder, screen_size, view_size);
- decoder_tester.set_frame(frame.get());
+ VideoDecoderTester decoder_tester(decoder, screen_size);
+ decoder_tester.set_expected_frame(frame.get());
decoder_tester.AddRegion(frame->updated_region());
scoped_ptr<VideoPacket> packet = encoder->Encode(*frame);
- decoder_tester.ReceivedScopedPacket(packet.Pass());
-
- decoder_tester.VerifyResultsApprox(expected_result->data(),
- max_error_limit, mean_error_limit);
-
- // Check that the decoder correctly re-renders the frame if its client
- // invalidates the frame.
- decoder_tester.ResetRenderedData();
- decoder->Invalidate(
- DesktopSize(view_size.width(), view_size.height()),
- DesktopRegion(
- DesktopRect::MakeWH(view_size.width(), view_size.height())));
- decoder_tester.RenderFrame();
- decoder_tester.VerifyResultsApprox(expected_result->data(),
- max_error_limit, mean_error_limit);
+ decoder_tester.ReceivedPacket(packet.Pass());
+
+ decoder_tester.VerifyResultsApprox(max_error_limit, mean_error_limit);
}
float MeasureVideoEncoderFpsWithSize(VideoEncoder* encoder,
diff --git a/remoting/codec/codec_test.h b/remoting/codec/codec_test.h
index e5d5bb6..7406a58 100644
--- a/remoting/codec/codec_test.h
+++ b/remoting/codec/codec_test.h
@@ -45,7 +45,6 @@ void TestVideoEncoderDecoder(VideoEncoder* encoder,
void TestVideoEncoderDecoderGradient(VideoEncoder* encoder,
VideoDecoder* decoder,
const webrtc::DesktopSize& screen_size,
- const webrtc::DesktopSize& view_size,
double max_error_limit,
double mean_error_limit);
diff --git a/remoting/codec/video_decoder.h b/remoting/codec/video_decoder.h
index 378abb00..65d488e 100644
--- a/remoting/codec/video_decoder.h
+++ b/remoting/codec/video_decoder.h
@@ -6,57 +6,25 @@
#define REMOTING_CODEC_VIDEO_DECODER_H_
#include "base/basictypes.h"
-#include "remoting/proto/video.pb.h"
namespace webrtc {
-class DesktopRect;
-class DesktopRegion;
-class DesktopSize;
+class DesktopFrame;
} // namespace webrtc
namespace remoting {
-// Interface for a decoder that takes a stream of bytes from the network and
-// outputs frames of data.
-// TODO(sergeyu): Simplify this interface.
+class VideoPacket;
+
+// Interface for a decoder that decodes video packets.
class VideoDecoder {
public:
- static const int kBytesPerPixel = 4;
-
VideoDecoder() {}
virtual ~VideoDecoder() {}
- // Feeds more data into the decoder. Returns true if |packet| was processed
- // and the frame can be displayed now.
- virtual bool DecodePacket(const VideoPacket& packet) = 0;
-
- // Marks the specified |region| of the view for update the next time
- // RenderFrame() is called. |region| is expressed in |view_size| coordinates.
- // |view_size| must not be empty.
- virtual void Invalidate(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRegion& region) = 0;
-
- // Copies invalidated pixels within |clip_area| to |image_buffer|. Pixels are
- // invalidated either by new data received in DecodePacket(), or by explicit
- // calls to Invalidate(). |clip_area| is specified in |view_size| coordinates.
- // If |view_size| differs from the source size then the copied pixels will be
- // scaled accordingly. |view_size| cannot be empty.
- //
- // |image_buffer|'s origin must correspond to the top-left of |clip_area|,
- // and the buffer must be large enough to hold |clip_area| RGBA32 pixels.
- // |image_stride| gives the output buffer's stride in pixels.
- //
- // On return, |output_region| contains the updated area, in |view_size|
- // coordinates.
- virtual void RenderFrame(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRect& clip_area,
- uint8* image_buffer,
- int image_stride,
- webrtc::DesktopRegion* output_region) = 0;
-
- // Returns the "shape", if any, of the most recently rendered frame.
- // The shape is returned in source dimensions.
- virtual const webrtc::DesktopRegion* GetImageShape() = 0;
+ // Decodes a video frame. Returns false in case of a failure. The caller must
+ // pre-allocate a |frame| with the size specified in the |packet|.
+ virtual bool DecodePacket(const VideoPacket& packet,
+ webrtc::DesktopFrame* frame) = 0;
};
} // namespace remoting
diff --git a/remoting/codec/video_decoder_verbatim.cc b/remoting/codec/video_decoder_verbatim.cc
index b35013a..0d7e8da 100644
--- a/remoting/codec/video_decoder_verbatim.cc
+++ b/remoting/codec/video_decoder_verbatim.cc
@@ -6,109 +6,55 @@
#include "base/logging.h"
#include "remoting/base/util.h"
+#include "remoting/proto/video.pb.h"
+#include "third_party/webrtc/modules/desktop_capture/desktop_frame.h"
+#include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
+#include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
namespace remoting {
-VideoDecoderVerbatim::VideoDecoderVerbatim() {}
+static const int kBytesPerPixel = 4;
+VideoDecoderVerbatim::VideoDecoderVerbatim() {}
VideoDecoderVerbatim::~VideoDecoderVerbatim() {}
-bool VideoDecoderVerbatim::DecodePacket(const VideoPacket& packet) {
- if (packet.format().has_screen_width() &&
- packet.format().has_screen_height()) {
- webrtc::DesktopSize screen_size(packet.format().screen_width(),
- packet.format().screen_height());
- // Allocate the screen buffer, if necessary.
- if (!screen_size.equals(screen_size_)) {
- screen_size_ = screen_size;
- screen_buffer_.reset(new uint8[screen_size_.width() *
- screen_size_.height() * kBytesPerPixel]);
- updated_region_.Clear();
- }
- }
-
- webrtc::DesktopRegion region;
-
- const char* in = packet.data().data();
- int stride = kBytesPerPixel * screen_size_.width();
+bool VideoDecoderVerbatim::DecodePacket(const VideoPacket& packet,
+ webrtc::DesktopFrame* frame) {
+ webrtc::DesktopRegion* region = frame->mutable_updated_region();
+ region->Clear();
+ const char* current_data_pos = packet.data().data();
for (int i = 0; i < packet.dirty_rects_size(); ++i) {
Rect proto_rect = packet.dirty_rects(i);
webrtc::DesktopRect rect =
- webrtc::DesktopRect::MakeXYWH(proto_rect.x(),
- proto_rect.y(),
- proto_rect.width(),
- proto_rect.height());
- region.AddRect(rect);
+ webrtc::DesktopRect::MakeXYWH(proto_rect.x(), proto_rect.y(),
+ proto_rect.width(), proto_rect.height());
+ region->AddRect(rect);
- if (!DoesRectContain(webrtc::DesktopRect::MakeSize(screen_size_), rect)) {
- LOG(ERROR) << "Invalid packet received";
+ if (!DoesRectContain(webrtc::DesktopRect::MakeSize(frame->size()), rect)) {
+ LOG(ERROR) << "Invalid packet received.";
return false;
}
int rect_row_size = kBytesPerPixel * rect.width();
- uint8_t* out = screen_buffer_.get() + rect.top() * stride +
- rect.left() * kBytesPerPixel;
- for (int y = rect.top(); y < rect.top() + rect.height(); ++y) {
- if (in + rect_row_size > packet.data().data() + packet.data().size()) {
- LOG(ERROR) << "Invalid packet received";
- return false;
- }
- memcpy(out, in, rect_row_size);
- in += rect_row_size;
- out += stride;
+ const char* rect_data_end =
+ current_data_pos + rect_row_size * rect.height();
+ if (rect_data_end > packet.data().data() + packet.data().size()) {
+ LOG(ERROR) << "Invalid packet received.";
+ return false;
}
+
+ uint8_t* source =
+ reinterpret_cast<uint8_t*>(const_cast<char*>(current_data_pos));
+ frame->CopyPixelsFrom(source, rect_row_size, rect);
+ current_data_pos = rect_data_end;
}
- if (in != packet.data().data() + packet.data().size()) {
- LOG(ERROR) << "Invalid packet received";
+ if (current_data_pos != packet.data().data() + packet.data().size()) {
+ LOG(ERROR) << "Invalid packet received.";
return false;
}
- updated_region_.AddRegion(region);
-
return true;
}
-void VideoDecoderVerbatim::Invalidate(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRegion& region) {
- updated_region_.AddRegion(region);
-}
-
-void VideoDecoderVerbatim::RenderFrame(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRect& clip_area,
- uint8* image_buffer,
- int image_stride,
- webrtc::DesktopRegion* output_region) {
- output_region->Clear();
-
- // TODO(alexeypa): scaling is not implemented.
- webrtc::DesktopRect clip_rect = webrtc::DesktopRect::MakeSize(screen_size_);
- clip_rect.IntersectWith(clip_area);
- if (clip_rect.is_empty())
- return;
-
- int screen_stride = screen_size_.width() * kBytesPerPixel;
-
- for (webrtc::DesktopRegion::Iterator i(updated_region_);
- !i.IsAtEnd(); i.Advance()) {
- webrtc::DesktopRect rect(i.rect());
- rect.IntersectWith(clip_rect);
- if (rect.is_empty())
- continue;
-
- CopyRGB32Rect(screen_buffer_.get(), screen_stride,
- clip_rect,
- image_buffer, image_stride,
- clip_area,
- rect);
- output_region->AddRect(rect);
- }
-
- updated_region_.Clear();
-}
-
-const webrtc::DesktopRegion* VideoDecoderVerbatim::GetImageShape() {
- return nullptr;
-}
-
} // namespace remoting
diff --git a/remoting/codec/video_decoder_verbatim.h b/remoting/codec/video_decoder_verbatim.h
index ce3aef8..cad36ca 100644
--- a/remoting/codec/video_decoder_verbatim.h
+++ b/remoting/codec/video_decoder_verbatim.h
@@ -18,31 +18,14 @@ namespace remoting {
// video frames.
class VideoDecoderVerbatim : public VideoDecoder {
public:
- ~VideoDecoderVerbatim() override;
-
VideoDecoderVerbatim();
+ ~VideoDecoderVerbatim() override;
// VideoDecoder implementation.
- bool DecodePacket(const VideoPacket& packet) override;
- void Invalidate(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRegion& region) override;
- void RenderFrame(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRect& clip_area,
- uint8* image_buffer,
- int image_stride,
- webrtc::DesktopRegion* output_region) override;
- const webrtc::DesktopRegion* GetImageShape() override;
+ bool DecodePacket(const VideoPacket& packet,
+ webrtc::DesktopFrame* frame) override;
private:
- // The region updated that hasn't been copied to the screen yet.
- webrtc::DesktopRegion updated_region_;
-
- // Size of the remote screen.
- webrtc::DesktopSize screen_size_;
-
- // The bitmap holding the remote screen bits.
- scoped_ptr<uint8[]> screen_buffer_;
-
DISALLOW_COPY_AND_ASSIGN(VideoDecoderVerbatim);
};
diff --git a/remoting/codec/video_decoder_vpx.cc b/remoting/codec/video_decoder_vpx.cc
index a54c704..cba3b20 100644
--- a/remoting/codec/video_decoder_vpx.cc
+++ b/remoting/codec/video_decoder_vpx.cc
@@ -6,13 +6,13 @@
#include <math.h>
-#include <algorithm>
-
#include "base/logging.h"
-#include "media/base/media.h"
-#include "media/base/yuv_convert.h"
#include "remoting/base/util.h"
+#include "remoting/proto/video.pb.h"
#include "third_party/libyuv/include/libyuv/convert_argb.h"
+#include "third_party/webrtc/modules/desktop_capture/desktop_frame.h"
+#include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
+#include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
extern "C" {
#define VPX_CODEC_DISABLE_COMPAT 1
@@ -22,6 +22,50 @@ extern "C" {
namespace remoting {
+namespace {
+
+void RenderRect(vpx_image_t* image,
+ webrtc::DesktopRect rect,
+ webrtc::DesktopFrame* frame) {
+ switch (image->fmt) {
+ case VPX_IMG_FMT_I420: {
+ // Align position of the top left corner so that its coordinates are
+ // always even.
+ rect = webrtc::DesktopRect::MakeLTRB(rect.left() & ~1, rect.top() & ~1,
+ rect.right(), rect.bottom());
+ uint8_t* image_data_ptr = frame->GetFrameDataAtPos(rect.top_left());
+ int y_offset = rect.top() * image->stride[0] + rect.left();
+ int u_offset = rect.top() / 2 * image->stride[1] + rect.left() / 2;
+ int v_offset = rect.top() / 2 * image->stride[2] + rect.left() / 2;
+ libyuv::I420ToARGB(image->planes[0] + y_offset, image->stride[0],
+ image->planes[1] + u_offset, image->stride[1],
+ image->planes[2] + v_offset, image->stride[2],
+ image_data_ptr, frame->stride(),
+ rect.width(), rect.height());
+ break;
+ }
+ // VP8 only outputs I420 frames, but VP9 can also produce I444.
+ case VPX_IMG_FMT_I444: {
+ uint8_t* image_data_ptr = frame->GetFrameDataAtPos(rect.top_left());
+ int y_offset = rect.top() * image->stride[0] + rect.left();
+ int u_offset = rect.top() * image->stride[1] + rect.left();
+ int v_offset = rect.top() * image->stride[2] + rect.left();
+ libyuv::I444ToARGB(image->planes[0] + y_offset, image->stride[0],
+ image->planes[1] + u_offset, image->stride[1],
+ image->planes[2] + v_offset, image->stride[2],
+ image_data_ptr, frame->stride(),
+ rect.width(), rect.height());
+ break;
+ }
+ default: {
+ LOG(ERROR) << "Unsupported image format:" << image->fmt;
+ return;
+ }
+ }
+}
+
+} // namespace
+
// static
scoped_ptr<VideoDecoderVpx> VideoDecoderVpx::CreateForVP8() {
return make_scoped_ptr(new VideoDecoderVpx(vpx_codec_vp8_dx()));
@@ -34,7 +78,8 @@ scoped_ptr<VideoDecoderVpx> VideoDecoderVpx::CreateForVP9() {
VideoDecoderVpx::~VideoDecoderVpx() {}
-bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet) {
+bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet,
+ webrtc::DesktopFrame* frame) {
// Pass the packet to the codec to process.
vpx_codec_err_t ret = vpx_codec_decode(
codec_.get(), reinterpret_cast<const uint8*>(packet.data().data()),
@@ -49,22 +94,27 @@ bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet) {
// Fetch the decoded video frame.
vpx_codec_iter_t iter = nullptr;
- image_ = vpx_codec_get_frame(codec_.get(), &iter);
- if (!image_) {
- LOG(ERROR) << "No video frame decoded";
+ vpx_image_t* image = vpx_codec_get_frame(codec_.get(), &iter);
+ if (!image) {
+ LOG(ERROR) << "No video frame decoded.";
+ return false;
+ }
+ if (!webrtc::DesktopSize(image->d_w, image->d_h).equals(frame->size())) {
+ LOG(ERROR) << "Size of the encoded frame doesn't match size in the header.";
return false;
}
- DCHECK(!image_size().is_empty());
// Determine which areas have been updated.
- webrtc::DesktopRegion region;
+ webrtc::DesktopRegion* region = frame->mutable_updated_region();
+ region->Clear();
for (int i = 0; i < packet.dirty_rects_size(); ++i) {
- Rect remoting_rect = packet.dirty_rects(i);
- region.AddRect(webrtc::DesktopRect::MakeXYWH(
- remoting_rect.x(), remoting_rect.y(),
- remoting_rect.width(), remoting_rect.height()));
+ Rect proto_rect = packet.dirty_rects(i);
+ webrtc::DesktopRect rect =
+ webrtc::DesktopRect::MakeXYWH(proto_rect.x(), proto_rect.y(),
+ proto_rect.width(), proto_rect.height());
+ region->AddRect(rect);
+ RenderRect(image, rect, frame);
}
- updated_region_.AddRegion(region);
// Process the frame shape, if supplied.
if (packet.has_use_desktop_shape()) {
@@ -73,155 +123,23 @@ bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet) {
desktop_shape_ = make_scoped_ptr(new webrtc::DesktopRegion);
desktop_shape_->Clear();
for (int i = 0; i < packet.desktop_shape_rects_size(); ++i) {
- Rect remoting_rect = packet.desktop_shape_rects(i);
+ Rect proto_rect = packet.desktop_shape_rects(i);
desktop_shape_->AddRect(webrtc::DesktopRect::MakeXYWH(
- remoting_rect.x(), remoting_rect.y(), remoting_rect.width(),
- remoting_rect.height()));
+ proto_rect.x(), proto_rect.y(), proto_rect.width(),
+ proto_rect.height()));
}
} else {
desktop_shape_.reset();
}
}
- return true;
-}
-
-void VideoDecoderVpx::Invalidate(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRegion& region) {
- DCHECK(!view_size.is_empty());
-
- for (webrtc::DesktopRegion::Iterator i(region); !i.IsAtEnd(); i.Advance()) {
- updated_region_.AddRect(ScaleRect(i.rect(), view_size, image_size()));
- }
-}
-
-void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRect& clip_area,
- uint8* image_buffer,
- int image_stride,
- webrtc::DesktopRegion* output_region) {
- DCHECK(!image_size().is_empty());
- DCHECK(!view_size.is_empty());
+ if (desktop_shape_)
+ frame->set_shape(new webrtc::DesktopRegion(*desktop_shape_));
- // Early-return and do nothing if we haven't yet decoded any frames.
- if (!image_)
- return;
-
- webrtc::DesktopRect source_clip = webrtc::DesktopRect::MakeSize(image_size());
-
- // VP8 only outputs I420 frames, but VP9 can also produce I444.
- switch (image_->fmt) {
- case VPX_IMG_FMT_I444: {
- // TODO(wez): Add scaling support to the I444 conversion path.
- if (view_size.equals(image_size())) {
- for (webrtc::DesktopRegion::Iterator i(updated_region_);
- !i.IsAtEnd(); i.Advance()) {
- // Determine the scaled area affected by this rectangle changing.
- webrtc::DesktopRect rect = i.rect();
- rect.IntersectWith(source_clip);
- rect.IntersectWith(clip_area);
- if (rect.is_empty())
- continue;
-
- int image_offset = image_stride * rect.top() +
- rect.left() * VideoDecoder::kBytesPerPixel;
- int y_offset = image_->stride[0] * rect.top() + rect.left();
- int u_offset = image_->stride[1] * rect.top() + rect.left();
- int v_offset = image_->stride[2] * rect.top() + rect.left();
- libyuv::I444ToARGB(image_->planes[0] + y_offset, image_->stride[0],
- image_->planes[1] + u_offset, image_->stride[1],
- image_->planes[2] + v_offset, image_->stride[2],
- image_buffer + image_offset, image_stride,
- rect.width(), rect.height());
-
- output_region->AddRect(rect);
- }
- }
- break;
- }
- case VPX_IMG_FMT_I420: {
- // ScaleYUVToRGB32WithRect does not currently support up-scaling. We
- // won't be asked to up-scale except during resizes or if page zoom is
- // >100%, so we work-around the limitation by using the slower
- // ScaleYUVToRGB32.
- // TODO(wez): Remove this hack if/when ScaleYUVToRGB32WithRect can
- // up-scale.
- if (!updated_region_.is_empty() &&
- (source_clip.width() < view_size.width() ||
- source_clip.height() < view_size.height())) {
- // We're scaling only |clip_area| into the |image_buffer|, so we need to
- // work out which source rectangle that corresponds to.
- webrtc::DesktopRect source_rect =
- ScaleRect(clip_area, view_size, image_size());
- source_rect = webrtc::DesktopRect::MakeLTRB(
- RoundToTwosMultiple(source_rect.left()),
- RoundToTwosMultiple(source_rect.top()),
- source_rect.right(),
- source_rect.bottom());
-
- // If there were no changes within the clip source area then don't
- // render.
- webrtc::DesktopRegion intersection(source_rect);
- intersection.IntersectWith(updated_region_);
- if (intersection.is_empty())
- return;
-
- // Scale & convert the entire clip area.
- int y_offset = CalculateYOffset(source_rect.left(), source_rect.top(),
- image_->stride[0]);
- int uv_offset = CalculateUVOffset(source_rect.left(), source_rect.top(),
- image_->stride[1]);
- ScaleYUVToRGB32(
- image_->planes[0] + y_offset, image_->planes[1] + uv_offset,
- image_->planes[2] + uv_offset, image_buffer, source_rect.width(),
- source_rect.height(), clip_area.width(), clip_area.height(),
- image_->stride[0], image_->stride[1], image_stride, media::YV12,
- media::ROTATE_0, media::FILTER_BILINEAR);
-
- output_region->AddRect(clip_area);
- updated_region_.Subtract(source_rect);
- return;
- }
-
- for (webrtc::DesktopRegion::Iterator i(updated_region_);
- !i.IsAtEnd(); i.Advance()) {
- // Determine the scaled area affected by this rectangle changing.
- webrtc::DesktopRect rect = i.rect();
- rect.IntersectWith(source_clip);
- if (rect.is_empty())
- continue;
- rect = ScaleRect(rect, image_size(), view_size);
- rect.IntersectWith(clip_area);
- if (rect.is_empty())
- continue;
-
- ConvertAndScaleYUVToRGB32Rect(
- image_->planes[0], image_->planes[1], image_->planes[2],
- image_->stride[0], image_->stride[1], image_size(), source_clip,
- image_buffer, image_stride, view_size, clip_area, rect);
-
- output_region->AddRect(rect);
- }
-
- updated_region_.Subtract(ScaleRect(clip_area, view_size, image_size()));
- break;
- }
- default: {
- LOG(ERROR) << "Unsupported image format:" << image_->fmt;
- return;
- }
- }
-
- webrtc::DesktopRect scaled_clip_area =
- ScaleRect(clip_area, view_size, image_size());
- updated_region_.Subtract(scaled_clip_area);
-}
-
-const webrtc::DesktopRegion* VideoDecoderVpx::GetImageShape() {
- return desktop_shape_.get();
+ return true;
}
-VideoDecoderVpx::VideoDecoderVpx(vpx_codec_iface_t* codec) : image_(nullptr) {
+VideoDecoderVpx::VideoDecoderVpx(vpx_codec_iface_t* codec) {
codec_.reset(new vpx_codec_ctx_t);
vpx_codec_dec_cfg config;
@@ -232,9 +150,4 @@ VideoDecoderVpx::VideoDecoderVpx(vpx_codec_iface_t* codec) : image_(nullptr) {
CHECK_EQ(VPX_CODEC_OK, ret);
}
-webrtc::DesktopSize VideoDecoderVpx::image_size() const {
- return image_ ? webrtc::DesktopSize(image_->d_w, image_->d_h)
- : webrtc::DesktopSize();
-}
-
} // namespace remoting
diff --git a/remoting/codec/video_decoder_vpx.h b/remoting/codec/video_decoder_vpx.h
index ea72def..81ba9e4 100644
--- a/remoting/codec/video_decoder_vpx.h
+++ b/remoting/codec/video_decoder_vpx.h
@@ -9,12 +9,15 @@
#include "base/memory/scoped_ptr.h"
#include "remoting/codec/scoped_vpx_codec.h"
#include "remoting/codec/video_decoder.h"
-#include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
-#include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
typedef const struct vpx_codec_iface vpx_codec_iface_t;
typedef struct vpx_image vpx_image_t;
+namespace webrtc {
+class DesktopRect;
+class DesktopRegion;
+} // namespace webrtc
+
namespace remoting {
class VideoDecoderVpx : public VideoDecoder {
@@ -26,30 +29,14 @@ class VideoDecoderVpx : public VideoDecoder {
~VideoDecoderVpx() override;
// VideoDecoder interface.
- bool DecodePacket(const VideoPacket& packet) override;
- void Invalidate(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRegion& region) override;
- void RenderFrame(const webrtc::DesktopSize& view_size,
- const webrtc::DesktopRect& clip_area,
- uint8* image_buffer,
- int image_stride,
- webrtc::DesktopRegion* output_region) override;
- const webrtc::DesktopRegion* GetImageShape() override;
+ bool DecodePacket(const VideoPacket& packet,
+ webrtc::DesktopFrame* frame) override;
private:
explicit VideoDecoderVpx(vpx_codec_iface_t* codec);
- // Returns the dimensions of the most recent frame as a DesktopSize.
- webrtc::DesktopSize image_size() const;
-
ScopedVpxCodec codec_;
- // Pointer to the most recently decoded image.
- vpx_image_t* image_;
-
- // Area of the source that has changed since the last RenderFrame call.
- webrtc::DesktopRegion updated_region_;
-
// The shape of the most-recent frame, if any.
scoped_ptr<webrtc::DesktopRegion> desktop_shape_;
diff --git a/remoting/codec/video_decoder_vpx_unittest.cc b/remoting/codec/video_decoder_vpx_unittest.cc
index 63cf870..a7f2530 100644
--- a/remoting/codec/video_decoder_vpx_unittest.cc
+++ b/remoting/codec/video_decoder_vpx_unittest.cc
@@ -24,12 +24,10 @@ class VideoDecoderVpxTest : public testing::Test {
}
void TestGradient(int screen_width, int screen_height,
- int view_width, int view_height,
double max_error_limit, double mean_error_limit) {
TestVideoEncoderDecoderGradient(
encoder_.get(), decoder_.get(),
webrtc::DesktopSize(screen_width, screen_height),
- webrtc::DesktopSize(view_width, view_height),
max_error_limit, mean_error_limit);
}
};
@@ -64,42 +62,7 @@ TEST_F(VideoDecoderVp8Test, VideoEncodeAndDecode) {
// frame too much. The frame used is a gradient, which does not contain sharp
// transitions, so encoding lossiness should not be too high.
TEST_F(VideoDecoderVp8Test, Gradient) {
- TestGradient(320, 240, 320, 240, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleUpEvenToEven) {
- TestGradient(320, 240, 640, 480, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleUpEvenToOdd) {
- TestGradient(320, 240, 641, 481, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleUpOddToEven) {
- TestGradient(321, 241, 640, 480, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleUpOddToOdd) {
- TestGradient(321, 241, 641, 481, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleDownEvenToEven) {
- TestGradient(320, 240, 160, 120, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleDownEvenToOdd) {
- // The maximum error is non-deterministic. The mean error is not too high,
- // which suggests that the problem is restricted to a small area of the output
- // image. See crbug.com/139437 and crbug.com/139633.
- TestGradient(320, 240, 161, 121, 1.0, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleDownOddToEven) {
- TestGradient(321, 241, 160, 120, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp8Test, GradientScaleDownOddToOdd) {
- TestGradient(321, 241, 161, 121, 0.04, 0.02);
+ TestGradient(320, 240, 0.04, 0.02);
}
//
@@ -114,42 +77,7 @@ TEST_F(VideoDecoderVp9Test, VideoEncodeAndDecode) {
// frame too much. The frame used is a gradient, which does not contain sharp
// transitions, so encoding lossiness should not be too high.
TEST_F(VideoDecoderVp9Test, Gradient) {
- TestGradient(320, 240, 320, 240, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleUpEvenToEven) {
- TestGradient(320, 240, 640, 480, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleUpEvenToOdd) {
- TestGradient(320, 240, 641, 481, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleUpOddToEven) {
- TestGradient(321, 241, 640, 480, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleUpOddToOdd) {
- TestGradient(321, 241, 641, 481, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleDownEvenToEven) {
- TestGradient(320, 240, 160, 120, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleDownEvenToOdd) {
- // The maximum error is non-deterministic. The mean error is not too high,
- // which suggests that the problem is restricted to a small area of the output
- // image. See crbug.com/139437 and crbug.com/139633.
- TestGradient(320, 240, 161, 121, 1.0, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleDownOddToEven) {
- TestGradient(321, 241, 160, 120, 0.04, 0.02);
-}
-
-TEST_F(VideoDecoderVp9Test, GradientScaleDownOddToOdd) {
- TestGradient(321, 241, 161, 121, 0.04, 0.02);
+ TestGradient(320, 240, 0.04, 0.02);
}
} // namespace remoting