summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwez <wez@chromium.org>2015-07-16 20:19:15 -0700
committerCommit bot <commit-bot@chromium.org>2015-07-17 03:19:47 +0000
commit070889be6cd0ff3425d6260fbaac59ed24f34627 (patch)
treec632b4fce22c055542e0b03f4ab4653ab14c0331
parent78b7284f164e10ecb382196bf38692410d11103b (diff)
downloadchromium_src-070889be6cd0ff3425d6260fbaac59ed24f34627.zip
chromium_src-070889be6cd0ff3425d6260fbaac59ed24f34627.tar.gz
chromium_src-070889be6cd0ff3425d6260fbaac59ed24f34627.tar.bz2
Allow shaped-desktop hosts to send shape only when it changes.
Previously hosts supplying a shaped desktop needed to attach the desktop shape to every frame, wasting bandwidth since the shape changes relatively infrequently. This CL updates the VideoRenderer implementations to preserve the shape (or lack of one) from the preceding frame if the VideoPacket does not include the use_desktop_shape field. Also simplifies FrameConsumerProxy to remove the need for ref-counting, updates NULL->nullptr throughout remoting/codec/, and removes unnecessary transparency logic from VideoDecoderVpx. BUG=446288 Review URL: https://codereview.chromium.org/1236663002 Cr-Commit-Position: refs/heads/master@{#339212}
-rw-r--r--remoting/client/frame_consumer.h17
-rw-r--r--remoting/client/frame_consumer_proxy.cc56
-rw-r--r--remoting/client/frame_consumer_proxy.h21
-rw-r--r--remoting/client/jni/chromoting_jni_instance.cc22
-rw-r--r--remoting/client/jni/chromoting_jni_instance.h6
-rw-r--r--remoting/client/jni/jni_frame_consumer.cc3
-rw-r--r--remoting/client/jni/jni_frame_consumer.h2
-rw-r--r--remoting/client/plugin/chromoting_instance.cc35
-rw-r--r--remoting/client/plugin/chromoting_instance.h2
-rw-r--r--remoting/client/plugin/pepper_video_renderer.h5
-rw-r--r--remoting/client/plugin/pepper_video_renderer_2d.cc21
-rw-r--r--remoting/client/plugin/pepper_video_renderer_2d.h7
-rw-r--r--remoting/client/plugin/pepper_video_renderer_3d.cc32
-rw-r--r--remoting/client/plugin/pepper_video_renderer_3d.h2
-rw-r--r--remoting/client/software_video_renderer.cc17
-rw-r--r--remoting/client/software_video_renderer.h3
-rw-r--r--remoting/codec/audio_decoder_opus.cc5
-rw-r--r--remoting/codec/codec_test.cc5
-rw-r--r--remoting/codec/video_decoder_vpx.cc242
-rw-r--r--remoting/codec/video_decoder_vpx.h27
20 files changed, 206 insertions, 324 deletions
diff --git a/remoting/client/frame_consumer.h b/remoting/client/frame_consumer.h
index 0996332..ab6f16e 100644
--- a/remoting/client/frame_consumer.h
+++ b/remoting/client/frame_consumer.h
@@ -26,19 +26,20 @@ class FrameConsumer {
FORMAT_RGBA, // Used for Android's Bitmap class.
};
- // Accepts a buffer to be painted to the screen. The buffer's dimensions and
- // relative position within the frame are specified by |clip_area|. Only
- // pixels falling within |region| and the current clipping area are painted.
- // The function assumes that the passed buffer was scaled to fit a window
- // having |view_size| dimensions.
+ // Paints the contents of |buffer| into the area of the view identified
+ // by |clip_area|. |view_size| specifies the full-frame dimensions to which
+ // the |buffer|/|clip_area| portion was scaled. Implementations may be
+ // optimized to only paint pixels within the intersection of |region| and
+ // |clip_area|. If |shape| is non-NULL then it specifies the complete shape
+ // of the frame, otherwise the frame is un-shaped.
//
- // N.B. Both |clip_area| and |region| are in output coordinates relative to
- // the frame.
+ // N.B. |clip_area|, |region| and |shape| should be provided in output view
+ // coordinates.
virtual void ApplyBuffer(const webrtc::DesktopSize& view_size,
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) = 0;
+ const webrtc::DesktopRegion* shape) = 0;
// Accepts a buffer that couldn't be used for drawing for any reason (shutdown
// is in progress, the view area has changed, etc.). The accepted buffer can
diff --git a/remoting/client/frame_consumer_proxy.cc b/remoting/client/frame_consumer_proxy.cc
index bf70da5..21d03bf 100644
--- a/remoting/client/frame_consumer_proxy.cc
+++ b/remoting/client/frame_consumer_proxy.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
#include "third_party/webrtc/modules/desktop_capture/desktop_frame.h"
#include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
#include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
@@ -14,51 +15,50 @@
namespace remoting {
FrameConsumerProxy::FrameConsumerProxy(
- scoped_refptr<base::SingleThreadTaskRunner> task_runner,
const base::WeakPtr<FrameConsumer>& frame_consumer)
: frame_consumer_(frame_consumer),
- task_runner_(task_runner) {
+ task_runner_(base::ThreadTaskRunnerHandle::Get()) {
pixel_format_ = frame_consumer_->GetPixelFormat();
}
+static void DoApplyBuffer(base::WeakPtr<FrameConsumer> frame_consumer,
+ const webrtc::DesktopSize& view_size,
+ const webrtc::DesktopRect& clip_area,
+ webrtc::DesktopFrame* buffer,
+ const webrtc::DesktopRegion& region,
+ scoped_ptr<webrtc::DesktopRegion> shape) {
+ if (!frame_consumer)
+ return;
+
+ frame_consumer->ApplyBuffer(view_size, clip_area, buffer, region,
+ shape.get());
+}
+
void FrameConsumerProxy::ApplyBuffer(const webrtc::DesktopSize& view_size,
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) {
- if (!task_runner_->BelongsToCurrentThread()) {
- task_runner_->PostTask(FROM_HERE, base::Bind(
- &FrameConsumerProxy::ApplyBuffer, this,
- view_size, clip_area, buffer, region, shape));
- return;
- }
-
- if (frame_consumer_.get())
- frame_consumer_->ApplyBuffer(view_size, clip_area, buffer, region, shape);
+ const webrtc::DesktopRegion* shape) {
+ scoped_ptr<webrtc::DesktopRegion> shape_ptr;
+ if (shape)
+ shape_ptr = make_scoped_ptr(new webrtc::DesktopRegion(*shape));
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(DoApplyBuffer, frame_consumer_, view_size, clip_area, buffer,
+ region, base::Passed(&shape_ptr)));
}
void FrameConsumerProxy::ReturnBuffer(webrtc::DesktopFrame* buffer) {
- if (!task_runner_->BelongsToCurrentThread()) {
- task_runner_->PostTask(FROM_HERE, base::Bind(
- &FrameConsumerProxy::ReturnBuffer, this, buffer));
- return;
- }
-
- if (frame_consumer_.get())
- frame_consumer_->ReturnBuffer(buffer);
+ task_runner_->PostTask(FROM_HERE, base::Bind(&FrameConsumer::ReturnBuffer,
+ frame_consumer_, buffer));
}
void FrameConsumerProxy::SetSourceSize(
const webrtc::DesktopSize& source_size,
const webrtc::DesktopVector& source_dpi) {
- if (!task_runner_->BelongsToCurrentThread()) {
- task_runner_->PostTask(FROM_HERE, base::Bind(
- &FrameConsumerProxy::SetSourceSize, this, source_size, source_dpi));
- return;
- }
-
- if (frame_consumer_.get())
- frame_consumer_->SetSourceSize(source_size, source_dpi);
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(&FrameConsumer::SetSourceSize, frame_consumer_,
+ source_size, source_dpi));
}
FrameConsumer::PixelFormat FrameConsumerProxy::GetPixelFormat() {
diff --git a/remoting/client/frame_consumer_proxy.h b/remoting/client/frame_consumer_proxy.h
index 15b1012..35bc76a 100644
--- a/remoting/client/frame_consumer_proxy.h
+++ b/remoting/client/frame_consumer_proxy.h
@@ -10,7 +10,6 @@
#ifndef REMOTING_CLIENT_FRAME_CONSUMER_PROXY_H_
#define REMOTING_CLIENT_FRAME_CONSUMER_PROXY_H_
-#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "remoting/client/frame_consumer.h"
@@ -20,33 +19,29 @@ class SingleThreadTaskRunner;
namespace remoting {
-class FrameConsumerProxy
- : public base::RefCountedThreadSafe<FrameConsumerProxy>,
- public FrameConsumer {
+class FrameConsumerProxy : public FrameConsumer {
public:
- // Constructs a proxy for |frame_consumer| which will trampoline invocations
- // to |frame_consumer_message_loop|.
- FrameConsumerProxy(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- const base::WeakPtr<FrameConsumer>& frame_consumer);
+ // Constructs a FrameConsumer proxy which can be passed to another thread,
+ // and will direct calls to |frame_consumer| on the thread from which the
+ // proxy was constructed.
+ FrameConsumerProxy(const base::WeakPtr<FrameConsumer>& frame_consumer);
+ ~FrameConsumerProxy() override;
// FrameConsumer implementation.
void ApplyBuffer(const webrtc::DesktopSize& view_size,
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) override;
+ const webrtc::DesktopRegion* shape) override;
void ReturnBuffer(webrtc::DesktopFrame* buffer) override;
void SetSourceSize(const webrtc::DesktopSize& source_size,
const webrtc::DesktopVector& dpi) override;
PixelFormat GetPixelFormat() override;
private:
- friend class base::RefCountedThreadSafe<FrameConsumerProxy>;
- ~FrameConsumerProxy() override;
-
base::WeakPtr<FrameConsumer> frame_consumer_;
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
PixelFormat pixel_format_;
DISALLOW_COPY_AND_ASSIGN(FrameConsumerProxy);
diff --git a/remoting/client/jni/chromoting_jni_instance.cc b/remoting/client/jni/chromoting_jni_instance.cc
index fd19dea..a30c2f4 100644
--- a/remoting/client/jni/chromoting_jni_instance.cc
+++ b/remoting/client/jni/chromoting_jni_instance.cc
@@ -387,28 +387,28 @@ void ChromotingJniInstance::ConnectToHostOnDisplayThread() {
view_.reset(new JniFrameConsumer(jni_runtime_, this));
view_weak_factory_.reset(new base::WeakPtrFactory<JniFrameConsumer>(
view_.get()));
- frame_consumer_ = new FrameConsumerProxy(jni_runtime_->display_task_runner(),
- view_weak_factory_->GetWeakPtr());
+ scoped_ptr<FrameConsumerProxy> frame_consumer =
+ make_scoped_ptr(new FrameConsumerProxy(view_weak_factory_->GetWeakPtr()));
jni_runtime_->network_task_runner()->PostTask(
FROM_HERE,
- base::Bind(&ChromotingJniInstance::ConnectToHostOnNetworkThread,
- this));
+ base::Bind(&ChromotingJniInstance::ConnectToHostOnNetworkThread, this,
+ base::Passed(&frame_consumer)));
}
-void ChromotingJniInstance::ConnectToHostOnNetworkThread() {
+void ChromotingJniInstance::ConnectToHostOnNetworkThread(
+ scoped_ptr<FrameConsumerProxy> frame_consumer) {
DCHECK(jni_runtime_->network_task_runner()->BelongsToCurrentThread());
+ DCHECK(frame_consumer);
jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
- client_context_.reset(new ClientContext(
- jni_runtime_->network_task_runner().get()));
+ client_context_.reset(new ClientContext(jni_runtime_->network_task_runner()));
client_context_->Start();
- SoftwareVideoRenderer* renderer =
- new SoftwareVideoRenderer(client_context_->main_task_runner(),
- client_context_->decode_task_runner(),
- frame_consumer_);
+ SoftwareVideoRenderer* renderer = new SoftwareVideoRenderer(
+ client_context_->main_task_runner(),
+ client_context_->decode_task_runner(), frame_consumer.Pass());
view_->set_frame_producer(renderer);
video_renderer_.reset(renderer);
diff --git a/remoting/client/jni/chromoting_jni_instance.h b/remoting/client/jni/chromoting_jni_instance.h
index 4ac7b1d..2ce50e0 100644
--- a/remoting/client/jni/chromoting_jni_instance.h
+++ b/remoting/client/jni/chromoting_jni_instance.h
@@ -119,7 +119,8 @@ class ChromotingJniInstance
~ChromotingJniInstance() override;
void ConnectToHostOnDisplayThread();
- void ConnectToHostOnNetworkThread();
+ void ConnectToHostOnNetworkThread(
+ scoped_ptr<FrameConsumerProxy> frame_consumer);
void DisconnectFromHostOnNetworkThread();
// Notifies the user interface that the user needs to enter a PIN. The
@@ -150,9 +151,8 @@ class ChromotingJniInstance
std::string host_jid_;
// This group of variables is to be used on the display thread.
- scoped_refptr<FrameConsumerProxy> frame_consumer_;
scoped_ptr<JniFrameConsumer> view_;
- scoped_ptr<base::WeakPtrFactory<JniFrameConsumer> > view_weak_factory_;
+ scoped_ptr<base::WeakPtrFactory<JniFrameConsumer>> view_weak_factory_;
// This group of variables is to be used on the network thread.
scoped_ptr<ClientContext> client_context_;
diff --git a/remoting/client/jni/jni_frame_consumer.cc b/remoting/client/jni/jni_frame_consumer.cc
index badb152..d921bfb 100644
--- a/remoting/client/jni/jni_frame_consumer.cc
+++ b/remoting/client/jni/jni_frame_consumer.cc
@@ -46,8 +46,9 @@ void JniFrameConsumer::ApplyBuffer(const webrtc::DesktopSize& view_size,
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) {
+ const webrtc::DesktopRegion* shape) {
DCHECK(jni_runtime_->display_task_runner()->BelongsToCurrentThread());
+ DCHECK(!shape);
if (bitmap_->size().width() != buffer->size().width() ||
bitmap_->size().height() != buffer->size().height()) {
diff --git a/remoting/client/jni/jni_frame_consumer.h b/remoting/client/jni/jni_frame_consumer.h
index e69501cf..a9a0361 100644
--- a/remoting/client/jni/jni_frame_consumer.h
+++ b/remoting/client/jni/jni_frame_consumer.h
@@ -44,7 +44,7 @@ class JniFrameConsumer : public FrameConsumer {
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) override;
+ const webrtc::DesktopRegion* shape) override;
void ReturnBuffer(webrtc::DesktopFrame* buffer) override;
void SetSourceSize(const webrtc::DesktopSize& source_size,
const webrtc::DesktopVector& dpi) override;
diff --git a/remoting/client/plugin/chromoting_instance.cc b/remoting/client/plugin/chromoting_instance.cc
index 7321047..9e8c291 100644
--- a/remoting/client/plugin/chromoting_instance.cc
+++ b/remoting/client/plugin/chromoting_instance.cc
@@ -438,26 +438,29 @@ void ChromotingInstance::OnVideoSize(const webrtc::DesktopSize& size,
PostLegacyJsonMessage("onDesktopSize", data.Pass());
}
-void ChromotingInstance::OnVideoShape(const webrtc::DesktopRegion& shape) {
- if (desktop_shape_ && shape.Equals(*desktop_shape_))
+void ChromotingInstance::OnVideoShape(const webrtc::DesktopRegion* shape) {
+ if ((shape && desktop_shape_ && shape->Equals(*desktop_shape_)) ||
+ (!shape && !desktop_shape_)) {
return;
+ }
- desktop_shape_.reset(new webrtc::DesktopRegion(shape));
-
- scoped_ptr<base::ListValue> rects_value(new base::ListValue());
- for (webrtc::DesktopRegion::Iterator i(shape); !i.IsAtEnd(); i.Advance()) {
- const webrtc::DesktopRect& rect = i.rect();
- scoped_ptr<base::ListValue> rect_value(new base::ListValue());
- rect_value->AppendInteger(rect.left());
- rect_value->AppendInteger(rect.top());
- rect_value->AppendInteger(rect.width());
- rect_value->AppendInteger(rect.height());
- rects_value->Append(rect_value.release());
+ scoped_ptr<base::DictionaryValue> shape_message(new base::DictionaryValue());
+ if (shape) {
+ desktop_shape_ = make_scoped_ptr(new webrtc::DesktopRegion(*shape));
+ scoped_ptr<base::ListValue> rects_value(new base::ListValue());
+ for (webrtc::DesktopRegion::Iterator i(*shape); !i.IsAtEnd(); i.Advance()) {
+ const webrtc::DesktopRect& rect = i.rect();
+ scoped_ptr<base::ListValue> rect_value(new base::ListValue());
+ rect_value->AppendInteger(rect.left());
+ rect_value->AppendInteger(rect.top());
+ rect_value->AppendInteger(rect.width());
+ rect_value->AppendInteger(rect.height());
+ rects_value->Append(rect_value.release());
+ }
+ shape_message->Set("rects", rects_value.release());
}
- scoped_ptr<base::DictionaryValue> data(new base::DictionaryValue());
- data->Set("rects", rects_value.release());
- PostLegacyJsonMessage("onDesktopShape", data.Pass());
+ PostLegacyJsonMessage("onDesktopShape", shape_message.Pass());
}
void ChromotingInstance::OnVideoFrameDirtyRegion(
diff --git a/remoting/client/plugin/chromoting_instance.h b/remoting/client/plugin/chromoting_instance.h
index 3c01a83..dc8216f 100644
--- a/remoting/client/plugin/chromoting_instance.h
+++ b/remoting/client/plugin/chromoting_instance.h
@@ -131,7 +131,7 @@ class ChromotingInstance : public ClientUserInterface,
void OnVideoFirstFrameReceived() override;
void OnVideoSize(const webrtc::DesktopSize& size,
const webrtc::DesktopVector& dpi) override;
- void OnVideoShape(const webrtc::DesktopRegion& shape) override;
+ void OnVideoShape(const webrtc::DesktopRegion* shape) override;
void OnVideoFrameDirtyRegion(
const webrtc::DesktopRegion& dirty_region) override;
diff --git a/remoting/client/plugin/pepper_video_renderer.h b/remoting/client/plugin/pepper_video_renderer.h
index 86713a5..bc26bc0 100644
--- a/remoting/client/plugin/pepper_video_renderer.h
+++ b/remoting/client/plugin/pepper_video_renderer.h
@@ -40,8 +40,9 @@ class PepperVideoRenderer : public VideoRenderer {
virtual void OnVideoSize(const webrtc::DesktopSize& size,
const webrtc::DesktopVector& dpi) = 0;
- // Called when desktop shape changes.
- virtual void OnVideoShape(const webrtc::DesktopRegion& shape) = 0;
+ // Called when desktop shape changes. |shape| should be NULL if frames are
+ // un-shaped.
+ virtual void OnVideoShape(const webrtc::DesktopRegion* shape) = 0;
// Called with each frame's updated region, if EnableDebugDirtyRegion(true)
// was called.
diff --git a/remoting/client/plugin/pepper_video_renderer_2d.cc b/remoting/client/plugin/pepper_video_renderer_2d.cc
index 678e829..e7d6f24 100644
--- a/remoting/client/plugin/pepper_video_renderer_2d.cc
+++ b/remoting/client/plugin/pepper_video_renderer_2d.cc
@@ -100,11 +100,11 @@ bool PepperVideoRenderer2D::Initialize(pp::Instance* instance,
instance_ = instance;
event_handler_ = event_handler;
- frame_consumer_proxy_ = new FrameConsumerProxy(
- context.main_task_runner(), weak_factory_.GetWeakPtr());
+ scoped_ptr<FrameConsumerProxy> frame_consumer_proxy =
+ make_scoped_ptr(new FrameConsumerProxy(weak_factory_.GetWeakPtr()));
software_video_renderer_.reset(new SoftwareVideoRenderer(
context.main_task_runner(), context.decode_task_runner(),
- frame_consumer_proxy_));
+ frame_consumer_proxy.Pass()));
return true;
}
@@ -207,7 +207,7 @@ void PepperVideoRenderer2D::ApplyBuffer(const webrtc::DesktopSize& view_size,
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) {
+ const webrtc::DesktopRegion* shape) {
DCHECK(CalledOnValidThread());
if (!frame_received_) {
@@ -216,15 +216,20 @@ void PepperVideoRenderer2D::ApplyBuffer(const webrtc::DesktopSize& view_size,
}
// We cannot use the data in the buffer if its dimensions don't match the
// current view size.
- // TODO(alexeypa): We could rescale and draw it (or even draw it without
- // rescaling) to reduce the perceived lag while we are waiting for
- // the properly scaled data.
if (!view_size_.equals(view_size)) {
FreeBuffer(buffer);
AllocateBuffers();
} else {
FlushBuffer(clip_area, buffer, region);
- event_handler_->OnVideoShape(shape);
+ if (shape) {
+ if (!source_shape_ || !source_shape_->Equals(*shape)) {
+ source_shape_ = make_scoped_ptr(new webrtc::DesktopRegion(*shape));
+ event_handler_->OnVideoShape(source_shape_.get());
+ }
+ } else if (source_shape_) {
+ source_shape_ = nullptr;
+ event_handler_->OnVideoShape(nullptr);
+ }
}
}
diff --git a/remoting/client/plugin/pepper_video_renderer_2d.h b/remoting/client/plugin/pepper_video_renderer_2d.h
index bae9cb3..21ac4ec 100644
--- a/remoting/client/plugin/pepper_video_renderer_2d.h
+++ b/remoting/client/plugin/pepper_video_renderer_2d.h
@@ -30,7 +30,6 @@ class DesktopFrame;
namespace remoting {
-class FrameConsumerProxy;
class SoftwareVideoRenderer;
// Video renderer that wraps SoftwareVideoRenderer and displays it using Pepper
@@ -60,7 +59,7 @@ class PepperVideoRenderer2D : public PepperVideoRenderer,
const webrtc::DesktopRect& clip_area,
webrtc::DesktopFrame* buffer,
const webrtc::DesktopRegion& region,
- const webrtc::DesktopRegion& shape) override;
+ const webrtc::DesktopRegion* shape) override;
void ReturnBuffer(webrtc::DesktopFrame* buffer) override;
void SetSourceSize(const webrtc::DesktopSize& source_size,
const webrtc::DesktopVector& dpi) override;
@@ -92,7 +91,6 @@ class PepperVideoRenderer2D : public PepperVideoRenderer,
pp::Graphics2D graphics2d_;
- scoped_refptr<FrameConsumerProxy> frame_consumer_proxy_;
scoped_ptr<SoftwareVideoRenderer> software_video_renderer_;
// List of allocated image buffers.
@@ -126,6 +124,9 @@ class PepperVideoRenderer2D : public PepperVideoRenderer,
// Resolution of the most recent source frame dots-per-inch.
webrtc::DesktopVector source_dpi_;
+ // Shape of the most recent source frame.
+ scoped_ptr<webrtc::DesktopRegion> source_shape_;
+
// True if there is already a Flush() pending on the Graphics2D context.
bool flush_pending_;
diff --git a/remoting/client/plugin/pepper_video_renderer_3d.cc b/remoting/client/plugin/pepper_video_renderer_3d.cc
index a40ebac..720f3a24 100644
--- a/remoting/client/plugin/pepper_video_renderer_3d.cc
+++ b/remoting/client/plugin/pepper_video_renderer_3d.cc
@@ -238,24 +238,24 @@ void PepperVideoRenderer3D::ProcessVideoPacket(scoped_ptr<VideoPacket> packet,
if (resolution_changed)
event_handler_->OnVideoSize(frame_size_, frame_dpi_);
- // Update the desktop shape region.
- webrtc::DesktopRegion desktop_shape;
+ // Process the frame shape, if supplied.
if (packet->has_use_desktop_shape()) {
- for (int i = 0; i < packet->desktop_shape_rects_size(); ++i) {
- Rect remoting_rect = packet->desktop_shape_rects(i);
- desktop_shape.AddRect(webrtc::DesktopRect::MakeXYWH(
- remoting_rect.x(), remoting_rect.y(),
- remoting_rect.width(), remoting_rect.height()));
+ if (packet->use_desktop_shape()) {
+ scoped_ptr<webrtc::DesktopRegion> shape(new webrtc::DesktopRegion);
+ for (int i = 0; i < packet->desktop_shape_rects_size(); ++i) {
+ Rect remoting_rect = packet->desktop_shape_rects(i);
+ shape->AddRect(webrtc::DesktopRect::MakeXYWH(
+ remoting_rect.x(), remoting_rect.y(), remoting_rect.width(),
+ remoting_rect.height()));
+ }
+ if (!frame_shape_ || !frame_shape_->Equals(*shape)) {
+ frame_shape_ = shape.Pass();
+ event_handler_->OnVideoShape(frame_shape_.get());
+ }
+ } else if (frame_shape_) {
+ frame_shape_ = nullptr;
+ event_handler_->OnVideoShape(nullptr);
}
- } else {
- // Fallback for the case when the host didn't include the desktop shape.
- desktop_shape =
- webrtc::DesktopRegion(webrtc::DesktopRect::MakeSize(frame_size_));
- }
-
- if (!desktop_shape_.Equals(desktop_shape)) {
- desktop_shape_.Swap(&desktop_shape);
- event_handler_->OnVideoShape(desktop_shape_);
}
// Report the dirty region, for debugging, if requested.
diff --git a/remoting/client/plugin/pepper_video_renderer_3d.h b/remoting/client/plugin/pepper_video_renderer_3d.h
index afe3c93..5783c44 100644
--- a/remoting/client/plugin/pepper_video_renderer_3d.h
+++ b/remoting/client/plugin/pepper_video_renderer_3d.h
@@ -103,7 +103,7 @@ class PepperVideoRenderer3D : public PepperVideoRenderer,
webrtc::DesktopSize frame_size_;
webrtc::DesktopVector frame_dpi_;
- webrtc::DesktopRegion desktop_shape_;
+ scoped_ptr<webrtc::DesktopRegion> frame_shape_;
webrtc::DesktopSize view_size_;
diff --git a/remoting/client/software_video_renderer.cc b/remoting/client/software_video_renderer.cc
index 748c2bf..05edf5b 100644
--- a/remoting/client/software_video_renderer.cc
+++ b/remoting/client/software_video_renderer.cc
@@ -80,7 +80,7 @@ class SoftwareVideoRenderer::Core {
public:
Core(scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner,
- scoped_refptr<FrameConsumerProxy> consumer);
+ scoped_ptr<FrameConsumerProxy> consumer);
~Core();
void OnSessionConfig(const protocol::SessionConfig& config);
@@ -104,7 +104,7 @@ class SoftwareVideoRenderer::Core {
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner_;
- scoped_refptr<FrameConsumerProxy> consumer_;
+ scoped_ptr<FrameConsumerProxy> consumer_;
scoped_ptr<VideoDecoder> decoder_;
// Remote screen size in pixels.
@@ -129,13 +129,12 @@ class SoftwareVideoRenderer::Core {
SoftwareVideoRenderer::Core::Core(
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner,
- scoped_refptr<FrameConsumerProxy> consumer)
+ scoped_ptr<FrameConsumerProxy> consumer)
: main_task_runner_(main_task_runner),
decode_task_runner_(decode_task_runner),
- consumer_(consumer),
+ consumer_(consumer.Pass()),
paint_scheduled_(false),
- weak_factory_(this) {
-}
+ weak_factory_(this) {}
SoftwareVideoRenderer::Core::~Core() {
}
@@ -242,7 +241,7 @@ void SoftwareVideoRenderer::Core::DoPaint() {
if (!output_region.is_empty()) {
buffers_.pop_front();
consumer_->ApplyBuffer(view_size_, clip_area_, buffer, output_region,
- *decoder_->GetImageShape());
+ decoder_->GetImageShape());
}
}
@@ -315,9 +314,9 @@ void SoftwareVideoRenderer::Core::SetOutputSizeAndClip(
SoftwareVideoRenderer::SoftwareVideoRenderer(
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner,
- scoped_refptr<FrameConsumerProxy> consumer)
+ scoped_ptr<FrameConsumerProxy> consumer)
: decode_task_runner_(decode_task_runner),
- core_(new Core(main_task_runner, decode_task_runner, consumer)),
+ core_(new Core(main_task_runner, decode_task_runner, consumer.Pass())),
latest_event_timestamp_(0),
weak_factory_(this) {
DCHECK(CalledOnValidThread());
diff --git a/remoting/client/software_video_renderer.h b/remoting/client/software_video_renderer.h
index 11be6dc..e6a9a5e 100644
--- a/remoting/client/software_video_renderer.h
+++ b/remoting/client/software_video_renderer.h
@@ -36,11 +36,10 @@ class SoftwareVideoRenderer : public VideoRenderer,
// outputting to |consumer|. The |main_task_runner_| is responsible for
// receiving and queueing packets. The |decode_task_runner_| is responsible
// for decoding the video packets.
- // TODO(wez): Replace the ref-counted proxy with an owned FrameConsumer.
SoftwareVideoRenderer(
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> decode_task_runner,
- scoped_refptr<FrameConsumerProxy> consumer);
+ scoped_ptr<FrameConsumerProxy> consumer);
~SoftwareVideoRenderer() override;
// VideoRenderer interface.
diff --git a/remoting/codec/audio_decoder_opus.cc b/remoting/codec/audio_decoder_opus.cc
index 656d4ac..e7bd5ac 100644
--- a/remoting/codec/audio_decoder_opus.cc
+++ b/remoting/codec/audio_decoder_opus.cc
@@ -26,10 +26,7 @@ const AudioPacket::SamplingRate kSamplingRate =
} // namespace
AudioDecoderOpus::AudioDecoderOpus()
- : sampling_rate_(0),
- channels_(0),
- decoder_(nullptr) {
-}
+ : sampling_rate_(0), channels_(0), decoder_(nullptr) {}
AudioDecoderOpus::~AudioDecoderOpus() {
DestroyDecoder();
diff --git a/remoting/codec/codec_test.cc b/remoting/codec/codec_test.cc
index 7c3dbbc..a97c51f 100644
--- a/remoting/codec/codec_test.cc
+++ b/remoting/codec/codec_test.cc
@@ -205,10 +205,7 @@ class VideoDecoderTester {
// the message to other subprograms for validaton.
class VideoEncoderTester {
public:
- VideoEncoderTester()
- : decoder_tester_(nullptr),
- data_available_(0) {
- }
+ VideoEncoderTester() : decoder_tester_(nullptr), data_available_(0) {}
~VideoEncoderTester() {
EXPECT_GT(data_available_, 0);
diff --git a/remoting/codec/video_decoder_vpx.cc b/remoting/codec/video_decoder_vpx.cc
index 7a8020f..b14d888 100644
--- a/remoting/codec/video_decoder_vpx.cc
+++ b/remoting/codec/video_decoder_vpx.cc
@@ -22,80 +22,25 @@ extern "C" {
namespace remoting {
-namespace {
-
-const uint32 kTransparentColor = 0;
-
-// Fills the rectangle |rect| with the given ARGB color |color| in |buffer|.
-void FillRect(uint8* buffer,
- int stride,
- const webrtc::DesktopRect& rect,
- uint32 color) {
- uint32* ptr = reinterpret_cast<uint32*>(buffer + (rect.top() * stride) +
- (rect.left() * VideoDecoder::kBytesPerPixel));
- int width = rect.width();
- for (int height = rect.height(); height > 0; --height) {
- std::fill(ptr, ptr + width, color);
- ptr += stride / VideoDecoder::kBytesPerPixel;
- }
-}
-
-} // namespace
-
// static
scoped_ptr<VideoDecoderVpx> VideoDecoderVpx::CreateForVP8() {
- ScopedVpxCodec codec(new vpx_codec_ctx_t);
-
- // TODO(hclam): Scale the number of threads with number of cores of the
- // machine.
- vpx_codec_dec_cfg config;
- config.w = 0;
- config.h = 0;
- config.threads = 2;
- vpx_codec_err_t ret =
- vpx_codec_dec_init(codec.get(), vpx_codec_vp8_dx(), &config, 0);
- if (ret != VPX_CODEC_OK) {
- LOG(ERROR) << "Cannot initialize codec.";
- return nullptr;
- }
-
- return make_scoped_ptr(new VideoDecoderVpx(codec.Pass()));
+ return make_scoped_ptr(new VideoDecoderVpx(vpx_codec_vp8_dx()));
}
// static
scoped_ptr<VideoDecoderVpx> VideoDecoderVpx::CreateForVP9() {
- ScopedVpxCodec codec(new vpx_codec_ctx_t);
-
- // TODO(hclam): Scale the number of threads with number of cores of the
- // machine.
- vpx_codec_dec_cfg config;
- config.w = 0;
- config.h = 0;
- config.threads = 2;
- vpx_codec_err_t ret =
- vpx_codec_dec_init(codec.get(), vpx_codec_vp9_dx(), &config, 0);
- if (ret != VPX_CODEC_OK) {
- LOG(ERROR) << "Cannot initialize codec.";
- return nullptr;
- }
-
- return make_scoped_ptr(new VideoDecoderVpx(codec.Pass()));
+ return make_scoped_ptr(new VideoDecoderVpx(vpx_codec_vp9_dx()));
}
VideoDecoderVpx::~VideoDecoderVpx() {}
-void VideoDecoderVpx::Initialize(const webrtc::DesktopSize& screen_size) {
- DCHECK(!screen_size.is_empty());
-
- screen_size_ = screen_size;
-
- transparent_region_.SetRect(webrtc::DesktopRect::MakeSize(screen_size_));
+void VideoDecoderVpx::Initialize(const webrtc::DesktopSize& source_size) {
+ // Nothing to do here; the codec handles resizing internally, and returns
+ // the source dimensions as part of the vpx_image_t.
}
bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet) {
- DCHECK(!screen_size_.is_empty());
-
- // Do the actual decoding.
+ // Pass the packet to the codec to process.
vpx_codec_err_t ret = vpx_codec_decode(
codec_.get(), reinterpret_cast<const uint8*>(packet.data().data()),
packet.data().size(), nullptr, 0);
@@ -107,15 +52,16 @@ bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet) {
return false;
}
- // Gets the decoded data.
+ // Fetch the decoded video frame.
vpx_codec_iter_t iter = nullptr;
- vpx_image_t* image = vpx_codec_get_frame(codec_.get(), &iter);
- if (!image) {
+ image_ = vpx_codec_get_frame(codec_.get(), &iter);
+ if (!image_) {
LOG(ERROR) << "No video frame decoded";
return false;
}
- last_image_ = image;
+ DCHECK(!image_size().is_empty());
+ // Determine which areas have been updated.
webrtc::DesktopRegion region;
for (int i = 0; i < packet.dirty_rects_size(); ++i) {
Rect remoting_rect = packet.dirty_rects(i);
@@ -123,27 +69,25 @@ bool VideoDecoderVpx::DecodePacket(const VideoPacket& packet) {
remoting_rect.x(), remoting_rect.y(),
remoting_rect.width(), remoting_rect.height()));
}
-
updated_region_.AddRegion(region);
- // Update the desktop shape region.
- webrtc::DesktopRegion desktop_shape_region;
+ // Process the frame shape, if supplied.
if (packet.has_use_desktop_shape()) {
- for (int i = 0; i < packet.desktop_shape_rects_size(); ++i) {
- Rect remoting_rect = packet.desktop_shape_rects(i);
- desktop_shape_region.AddRect(webrtc::DesktopRect::MakeXYWH(
- remoting_rect.x(), remoting_rect.y(),
- remoting_rect.width(), remoting_rect.height()));
+ if (packet.use_desktop_shape()) {
+ if (!desktop_shape_)
+ desktop_shape_ = make_scoped_ptr(new webrtc::DesktopRegion);
+ desktop_shape_->Clear();
+ for (int i = 0; i < packet.desktop_shape_rects_size(); ++i) {
+ Rect remoting_rect = packet.desktop_shape_rects(i);
+ desktop_shape_->AddRect(webrtc::DesktopRect::MakeXYWH(
+ remoting_rect.x(), remoting_rect.y(), remoting_rect.width(),
+ remoting_rect.height()));
+ }
+ } else {
+ desktop_shape_.reset();
}
- } else {
- // Fallback for the case when the host didn't include the desktop shape
- // region.
- desktop_shape_region =
- webrtc::DesktopRegion(webrtc::DesktopRect::MakeSize(screen_size_));
}
- UpdateImageShapeRegion(&desktop_shape_region);
-
return true;
}
@@ -152,15 +96,8 @@ void VideoDecoderVpx::Invalidate(const webrtc::DesktopSize& view_size,
DCHECK(!view_size.is_empty());
for (webrtc::DesktopRegion::Iterator i(region); !i.IsAtEnd(); i.Advance()) {
- updated_region_.AddRect(ScaleRect(i.rect(), view_size, screen_size_));
+ updated_region_.AddRect(ScaleRect(i.rect(), view_size, image_size()));
}
-
- // Updated areas outside of the new desktop shape region should be made
- // transparent, not repainted.
- webrtc::DesktopRegion difference = updated_region_;
- difference.Subtract(desktop_shape_);
- updated_region_.Subtract(difference);
- transparent_region_.AddRegion(difference);
}
void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
@@ -168,21 +105,20 @@ void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
uint8* image_buffer,
int image_stride,
webrtc::DesktopRegion* output_region) {
- DCHECK(!screen_size_.is_empty());
+ DCHECK(!image_size().is_empty());
DCHECK(!view_size.is_empty());
// Early-return and do nothing if we haven't yet decoded any frames.
- if (!last_image_)
+ if (!image_)
return;
- webrtc::DesktopRect source_clip =
- webrtc::DesktopRect::MakeWH(last_image_->d_w, last_image_->d_h);
+ webrtc::DesktopRect source_clip = webrtc::DesktopRect::MakeSize(image_size());
// VP8 only outputs I420 frames, but VP9 can also produce I444.
- switch (last_image_->fmt) {
+ switch (image_->fmt) {
case VPX_IMG_FMT_I444: {
// TODO(wez): Add scaling support to the I444 conversion path.
- if (view_size.equals(screen_size_)) {
+ if (view_size.equals(image_size())) {
for (webrtc::DesktopRegion::Iterator i(updated_region_);
!i.IsAtEnd(); i.Advance()) {
// Determine the scaled area affected by this rectangle changing.
@@ -194,15 +130,12 @@ void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
int image_offset = image_stride * rect.top() +
rect.left() * VideoDecoder::kBytesPerPixel;
- int y_offset = last_image_->stride[0] * rect.top() + rect.left();
- int u_offset = last_image_->stride[1] * rect.top() + rect.left();
- int v_offset = last_image_->stride[2] * rect.top() + rect.left();
- libyuv::I444ToARGB(last_image_->planes[0] + y_offset,
- last_image_->stride[0],
- last_image_->planes[1] + u_offset,
- last_image_->stride[1],
- last_image_->planes[2] + v_offset,
- last_image_->stride[2],
+ int y_offset = image_->stride[0] * rect.top() + rect.left();
+ int u_offset = image_->stride[1] * rect.top() + rect.left();
+ int v_offset = image_->stride[2] * rect.top() + rect.left();
+ libyuv::I444ToARGB(image_->planes[0] + y_offset, image_->stride[0],
+ image_->planes[1] + u_offset, image_->stride[1],
+ image_->planes[2] + v_offset, image_->stride[2],
image_buffer + image_offset, image_stride,
rect.width(), rect.height());
@@ -224,7 +157,7 @@ void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
// We're scaling only |clip_area| into the |image_buffer|, so we need to
// work out which source rectangle that corresponds to.
webrtc::DesktopRect source_rect =
- ScaleRect(clip_area, view_size, screen_size_);
+ ScaleRect(clip_area, view_size, image_size());
source_rect = webrtc::DesktopRect::MakeLTRB(
RoundToTwosMultiple(source_rect.left()),
RoundToTwosMultiple(source_rect.top()),
@@ -240,23 +173,15 @@ void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
// Scale & convert the entire clip area.
int y_offset = CalculateYOffset(source_rect.left(), source_rect.top(),
- last_image_->stride[0]);
+ image_->stride[0]);
int uv_offset = CalculateUVOffset(source_rect.left(), source_rect.top(),
- last_image_->stride[1]);
- ScaleYUVToRGB32(last_image_->planes[0] + y_offset,
- last_image_->planes[1] + uv_offset,
- last_image_->planes[2] + uv_offset,
- image_buffer,
- source_rect.width(),
- source_rect.height(),
- clip_area.width(),
- clip_area.height(),
- last_image_->stride[0],
- last_image_->stride[1],
- image_stride,
- media::YV12,
- media::ROTATE_0,
- media::FILTER_BILINEAR);
+ image_->stride[1]);
+ ScaleYUVToRGB32(
+ image_->planes[0] + y_offset, image_->planes[1] + uv_offset,
+ image_->planes[2] + uv_offset, image_buffer, source_rect.width(),
+ source_rect.height(), clip_area.width(), clip_area.height(),
+ image_->stride[0], image_->stride[1], image_stride, media::YV12,
+ media::ROTATE_0, media::FILTER_BILINEAR);
output_region->AddRect(clip_area);
updated_region_.Subtract(source_rect);
@@ -270,86 +195,51 @@ void VideoDecoderVpx::RenderFrame(const webrtc::DesktopSize& view_size,
rect.IntersectWith(source_clip);
if (rect.is_empty())
continue;
- rect = ScaleRect(rect, screen_size_, view_size);
+ rect = ScaleRect(rect, image_size(), view_size);
rect.IntersectWith(clip_area);
if (rect.is_empty())
continue;
- ConvertAndScaleYUVToRGB32Rect(last_image_->planes[0],
- last_image_->planes[1],
- last_image_->planes[2],
- last_image_->stride[0],
- last_image_->stride[1],
- screen_size_,
- source_clip,
- image_buffer,
- image_stride,
- view_size,
- clip_area,
- rect);
+ ConvertAndScaleYUVToRGB32Rect(
+ image_->planes[0], image_->planes[1], image_->planes[2],
+ image_->stride[0], image_->stride[1], image_size(), source_clip,
+ image_buffer, image_stride, view_size, clip_area, rect);
output_region->AddRect(rect);
}
- updated_region_.Subtract(ScaleRect(clip_area, view_size, screen_size_));
+ updated_region_.Subtract(ScaleRect(clip_area, view_size, image_size()));
break;
}
default: {
- LOG(ERROR) << "Unsupported image format:" << last_image_->fmt;
+ LOG(ERROR) << "Unsupported image format:" << image_->fmt;
return;
}
}
- for (webrtc::DesktopRegion::Iterator i(transparent_region_);
- !i.IsAtEnd(); i.Advance()) {
- // Determine the scaled area affected by this rectangle changing.
- webrtc::DesktopRect rect = i.rect();
- rect.IntersectWith(source_clip);
- if (rect.is_empty())
- continue;
- rect = ScaleRect(rect, screen_size_, view_size);
- rect.IntersectWith(clip_area);
- if (rect.is_empty())
- continue;
-
- // Fill the rectange with transparent pixels.
- FillRect(image_buffer, image_stride, rect, kTransparentColor);
- output_region->AddRect(rect);
- }
-
webrtc::DesktopRect scaled_clip_area =
- ScaleRect(clip_area, view_size, screen_size_);
+ ScaleRect(clip_area, view_size, image_size());
updated_region_.Subtract(scaled_clip_area);
- transparent_region_.Subtract(scaled_clip_area);
}
const webrtc::DesktopRegion* VideoDecoderVpx::GetImageShape() {
- return &desktop_shape_;
+ return desktop_shape_.get();
}
-VideoDecoderVpx::VideoDecoderVpx(ScopedVpxCodec codec)
- : codec_(codec.Pass()),
- last_image_(nullptr) {
- DCHECK(codec_);
+VideoDecoderVpx::VideoDecoderVpx(vpx_codec_iface_t* codec) : image_(nullptr) {
+ codec_.reset(new vpx_codec_ctx_t);
+
+ vpx_codec_dec_cfg config;
+ config.w = 0;
+ config.h = 0;
+ config.threads = 2;
+ vpx_codec_err_t ret = vpx_codec_dec_init(codec_.get(), codec, &config, 0);
+ CHECK_EQ(VPX_CODEC_OK, ret);
}
-void VideoDecoderVpx::UpdateImageShapeRegion(
- webrtc::DesktopRegion* new_desktop_shape) {
- // Add all areas that have been updated or become transparent to the
- // transparent region. Exclude anything within the new desktop shape.
- transparent_region_.AddRegion(desktop_shape_);
- transparent_region_.AddRegion(updated_region_);
- transparent_region_.Subtract(*new_desktop_shape);
-
- // Add newly exposed areas to the update region and limit updates to the new
- // desktop shape.
- webrtc::DesktopRegion difference = *new_desktop_shape;
- difference.Subtract(desktop_shape_);
- updated_region_.AddRegion(difference);
- updated_region_.IntersectWith(*new_desktop_shape);
-
- // Set the new desktop shape region.
- desktop_shape_.Swap(new_desktop_shape);
+webrtc::DesktopSize VideoDecoderVpx::image_size() const {
+ return image_ ? webrtc::DesktopSize(image_->d_w, image_->d_h)
+ : webrtc::DesktopSize();
}
} // namespace remoting
diff --git a/remoting/codec/video_decoder_vpx.h b/remoting/codec/video_decoder_vpx.h
index 9c96998..21a576a 100644
--- a/remoting/codec/video_decoder_vpx.h
+++ b/remoting/codec/video_decoder_vpx.h
@@ -12,6 +12,7 @@
#include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h"
#include "third_party/webrtc/modules/desktop_capture/desktop_region.h"
+typedef const struct vpx_codec_iface vpx_codec_iface_t;
typedef struct vpx_image vpx_image_t;
namespace remoting {
@@ -25,7 +26,7 @@ class VideoDecoderVpx : public VideoDecoder {
~VideoDecoderVpx() override;
// VideoDecoder interface.
- void Initialize(const webrtc::DesktopSize& screen_size) override;
+ void Initialize(const webrtc::DesktopSize& source_size) override;
bool DecodePacket(const VideoPacket& packet) override;
void Invalidate(const webrtc::DesktopSize& view_size,
const webrtc::DesktopRegion& region) override;
@@ -37,29 +38,21 @@ class VideoDecoderVpx : public VideoDecoder {
const webrtc::DesktopRegion* GetImageShape() override;
private:
- explicit VideoDecoderVpx(ScopedVpxCodec codec);
+ explicit VideoDecoderVpx(vpx_codec_iface_t* codec);
- // Calculates the difference between the desktop shape regions in two
- // consecutive frames and updates |updated_region_| and |transparent_region_|
- // accordingly.
- void UpdateImageShapeRegion(webrtc::DesktopRegion* new_desktop_shape);
+ // Returns the dimensions of the most recent frame as a DesktopSize.
+ webrtc::DesktopSize image_size() const;
ScopedVpxCodec codec_;
- // Pointer to the last decoded image.
- vpx_image_t* last_image_;
+ // Pointer to the most recently decoded image.
+ vpx_image_t* image_;
- // The region updated that hasn't been copied to the screen yet.
+ // Area of the source that has changed since the last RenderFrame call.
webrtc::DesktopRegion updated_region_;
- // Output dimensions.
- webrtc::DesktopSize screen_size_;
-
- // The region occupied by the top level windows.
- webrtc::DesktopRegion desktop_shape_;
-
- // The region that should be make transparent.
- webrtc::DesktopRegion transparent_region_;
+ // The shape of the most-recent frame, if any.
+ scoped_ptr<webrtc::DesktopRegion> desktop_shape_;
DISALLOW_COPY_AND_ASSIGN(VideoDecoderVpx);
};