summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsergeyu@chromium.org <sergeyu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-11-09 23:22:20 +0000
committersergeyu@chromium.org <sergeyu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-11-09 23:22:20 +0000
commit3adf1b2a65a85ff7d4b55cd57a4e400f104d27dd (patch)
tree235aee1f595583297e057b058a29d2ed24d9da92
parent9db9173baebf27623ce30770696f84a3fec74259 (diff)
downloadchromium_src-3adf1b2a65a85ff7d4b55cd57a4e400f104d27dd.zip
chromium_src-3adf1b2a65a85ff7d4b55cd57a4e400f104d27dd.tar.gz
chromium_src-3adf1b2a65a85ff7d4b55cd57a4e400f104d27dd.tar.bz2
Add VideoPacket struct for video packets. Refactor Decode interface to use it.
Various cleanups. BUG=None TEST=Unittests. Review URL: http://codereview.chromium.org/4476003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@65590 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--remoting/base/capture_data.cc2
-rw-r--r--remoting/base/capture_data.h8
-rw-r--r--remoting/base/decoder.h41
-rw-r--r--remoting/base/decoder_row_based.cc103
-rw-r--r--remoting/base/decoder_row_based.h24
-rw-r--r--remoting/base/decoder_vp8.cc19
-rw-r--r--remoting/base/decoder_vp8.h14
-rw-r--r--remoting/base/encoder.h3
-rw-r--r--remoting/base/encoder_verbatim.cc6
-rw-r--r--remoting/base/encoder_vp8.cc9
-rw-r--r--remoting/base/encoder_zlib.cc5
-rw-r--r--remoting/base/util.cc12
-rw-r--r--remoting/base/util.h5
-rw-r--r--remoting/client/chromoting_client.cc54
-rw-r--r--remoting/client/chromoting_client.h10
-rw-r--r--remoting/client/rectangle_update_decoder.cc148
-rw-r--r--remoting/client/rectangle_update_decoder.h21
-rw-r--r--remoting/host/capturer.cc17
-rw-r--r--remoting/host/capturer.h8
-rw-r--r--remoting/host/capturer_fake.cc2
-rw-r--r--remoting/host/capturer_fake_ascii.cc2
-rw-r--r--remoting/host/capturer_gdi.cc1
-rw-r--r--remoting/host/capturer_linux.cc2
-rw-r--r--remoting/host/capturer_mac.cc7
-rw-r--r--remoting/host/session_manager.cc24
-rw-r--r--remoting/host/session_manager_unittest.cc4
-rw-r--r--remoting/proto/internal.proto9
-rw-r--r--remoting/proto/video.proto20
-rw-r--r--remoting/protocol/connection_to_host.h8
-rw-r--r--remoting/protocol/jingle_connection_to_host.cc12
-rw-r--r--remoting/protocol/jingle_connection_to_host.h5
-rw-r--r--remoting/protocol/message_decoder_unittest.cc39
-rw-r--r--remoting/protocol/protobuf_video_reader.cc11
-rw-r--r--remoting/protocol/protobuf_video_reader.h5
-rw-r--r--remoting/protocol/protobuf_video_writer.cc1
-rw-r--r--remoting/protocol/rtp_video_reader.cc11
-rw-r--r--remoting/protocol/rtp_video_writer.cc6
-rw-r--r--remoting/protocol/stream_writer.cc2
-rw-r--r--remoting/protocol/stream_writer.h2
-rw-r--r--remoting/protocol/video_reader.cc5
-rw-r--r--remoting/protocol/video_stub.h5
-rw-r--r--remoting/protocol/video_writer.h3
42 files changed, 332 insertions, 363 deletions
diff --git a/remoting/base/capture_data.cc b/remoting/base/capture_data.cc
index 2d96441..8bf464d 100644
--- a/remoting/base/capture_data.cc
+++ b/remoting/base/capture_data.cc
@@ -16,7 +16,7 @@ DataPlanes::DataPlanes() {
CaptureData::CaptureData(const DataPlanes &data_planes,
int width,
int height,
- PixelFormat format) :
+ media::VideoFrame::Format format) :
data_planes_(data_planes), dirty_rects_(),
width_(width), height_(height), pixel_format_(format) {
}
diff --git a/remoting/base/capture_data.h b/remoting/base/capture_data.h
index 97631ab..f399e70 100644
--- a/remoting/base/capture_data.h
+++ b/remoting/base/capture_data.h
@@ -9,8 +9,8 @@
#include "base/basictypes.h"
#include "base/ref_counted.h"
+#include "media/base/video_frame.h"
#include "remoting/base/types.h"
-#include "remoting/proto/video.pb.h"
namespace remoting {
@@ -29,7 +29,7 @@ class CaptureData : public base::RefCountedThreadSafe<CaptureData> {
CaptureData(const DataPlanes &data_planes,
int width,
int height,
- PixelFormat format);
+ media::VideoFrame::Format format);
// Get the data_planes data of the last capture.
const DataPlanes& data_planes() const { return data_planes_; }
@@ -45,7 +45,7 @@ class CaptureData : public base::RefCountedThreadSafe<CaptureData> {
int height() const { return height_; }
// Get the pixel format of the image captured.
- PixelFormat pixel_format() const { return pixel_format_; }
+ media::VideoFrame::Format pixel_format() const { return pixel_format_; }
// Mutating methods.
InvalidRects& mutable_dirty_rects() { return dirty_rects_; }
@@ -55,7 +55,7 @@ class CaptureData : public base::RefCountedThreadSafe<CaptureData> {
InvalidRects dirty_rects_;
int width_;
int height_;
- PixelFormat pixel_format_;
+ media::VideoFrame::Format pixel_format_;
friend class base::RefCountedThreadSafe<CaptureData>;
virtual ~CaptureData();
diff --git a/remoting/base/decoder.h b/remoting/base/decoder.h
index 82211a7..bcb5a54 100644
--- a/remoting/base/decoder.h
+++ b/remoting/base/decoder.h
@@ -5,6 +5,8 @@
#ifndef REMOTING_BASE_DECODER_H_
#define REMOTING_BASE_DECODER_H_
+#include <vector>
+
#include "base/task.h"
#include "base/scoped_ptr.h"
#include "gfx/rect.h"
@@ -21,34 +23,39 @@ typedef std::vector<gfx::Rect> UpdatedRects;
// TODO(ajwong): Beef up this documentation once the API stablizes.
class Decoder {
public:
+ // DecodeResult is returned from DecodePacket() and indicates current state
+ // of the decoder. DECODE_DONE means that last packet for the frame was
+ // processed, and the frame can be displayed now. DECODE_IN_PROGRESS
+ // indicates that the decoder must receive more data before the frame can be
+ // displayed. DECODE_ERROR is returned if there was an error in the stream.
+ enum DecodeResult {
+ DECODE_ERROR = -1,
+ DECODE_IN_PROGRESS,
+ DECODE_DONE,
+ };
+
Decoder() {}
virtual ~Decoder() {}
- // TODO(ajwong): This API is incorrect in the face of a streaming decode
- // protocol like VP8. However, it breaks the layering abstraction by
- // depending on the network packet protocol buffer type. I'm going to go
- // forward with it as is, and then refactor again to support streaming
- // decodes.
-
// Initializes the decoder to draw into the given |frame|. The |clip|
// specifies the region to draw into. The clip region must fit inside
- // the dimensions of frame. Failure to do so will CHECK Fail.
- //
- // TODO(ajwong): Should this take the source pixel format?
- // TODO(ajwong): Should the protocol be split into basic-types followed
- // by packet types? Basic types might include the format enum below.
- virtual void Initialize(scoped_refptr<media::VideoFrame> frame,
- const gfx::Rect& clip, int bytes_per_src_pixel) = 0;
+ // the dimensions of frame. Failure to do so will CHECK fail.
+ virtual void Initialize(scoped_refptr<media::VideoFrame> frame) = 0;
+
+ // Feeds more data into the decoder.
+ virtual DecodeResult DecodePacket(const VideoPacket* packet) = 0;
+
+ // Returns rects that were updated in the last frame. Can be called only
+ // after DecodePacket returned DECODE_DONE. Caller keeps ownership of
+ // |rects|. |rects| is kept empty if whole screen needs to be updated.
+ virtual void GetUpdatedRects(UpdatedRects* rects) = 0;
// Reset the decoder to an uninitialized state. Release all references to
// the initialized |frame|. Initialize() must be called before the decoder
// is used again.
virtual void Reset() = 0;
- // Feeds more data into the decoder.
- virtual void DecodeBytes(const std::string& encoded_bytes) = 0;
-
- // Returns true if decoder is ready to accept data via ProcessRectangleData.
+ // Returns true if decoder is ready to accept data via DecodePacket.
virtual bool IsReadyForData() = 0;
virtual VideoPacketFormat::Encoding Encoding() = 0;
diff --git a/remoting/base/decoder_row_based.cc b/remoting/base/decoder_row_based.cc
index d378e96..1a0efad 100644
--- a/remoting/base/decoder_row_based.cc
+++ b/remoting/base/decoder_row_based.cc
@@ -11,6 +11,11 @@
namespace remoting {
+namespace {
+// Both input and output data are assumed to be RGBA32.
+const int kBytesPerPixel = 4;
+}
+
DecoderRowBased* DecoderRowBased::CreateZlibDecoder() {
return new DecoderRowBased(new DecompressorZlib(),
VideoPacketFormat::ENCODING_ZLIB);
@@ -26,7 +31,6 @@ DecoderRowBased::DecoderRowBased(Decompressor* decompressor,
: state_(kUninitialized),
decompressor_(decompressor),
encoding_(encoding),
- bytes_per_src_pixel_(0),
row_pos_(0),
row_y_(0),
// TODO(hclam): We should use the information from the update stream
@@ -45,38 +49,35 @@ void DecoderRowBased::Reset() {
}
bool DecoderRowBased::IsReadyForData() {
- return state_ == kReady;
+ return state_ == kReady || state_ == kProcessing || state_ == kDone;
}
-void DecoderRowBased::Initialize(scoped_refptr<media::VideoFrame> frame,
- const gfx::Rect& clip,
- int bytes_per_src_pixel) {
+void DecoderRowBased::Initialize(scoped_refptr<media::VideoFrame> frame) {
// Make sure we are not currently initialized.
CHECK_EQ(kUninitialized, state_);
- if (static_cast<PixelFormat>(frame->format()) != PIXEL_FORMAT_RGB32) {
+ if (frame->format() != media::VideoFrame::RGB32) {
LOG(WARNING) << "DecoderRowBased only supports RGB32.";
state_ = kError;
return;
}
frame_ = frame;
-
- // Reset the buffer location status variables.
- clip_ = clip;
- row_pos_ = 0;
- row_y_ = 0;
- bytes_per_src_pixel_ = bytes_per_src_pixel;
-
state_ = kReady;
}
-void DecoderRowBased::DecodeBytes(const std::string& encoded_bytes) {
- DCHECK_EQ(kReady, state_);
+Decoder::DecodeResult DecoderRowBased::DecodePacket(
+ const VideoPacket* packet) {
+ UpdateStateForPacket(packet);
- const uint8* in = reinterpret_cast<const uint8*>(encoded_bytes.data());
- const int in_size = encoded_bytes.size();
- const int row_size = clip_.width() * bytes_per_src_pixel_;
+ if (state_ == kError) {
+ return DECODE_ERROR;
+ }
+
+ const uint8* in = reinterpret_cast<const uint8*>(packet->data().data());
+ const int in_size = packet->data().size();
+
+ const int row_size = clip_.width() * kBytesPerPixel;
int stride = frame_->stride(media::VideoFrame::kRGBPlane);
uint8* rect_begin = frame_->data(media::VideoFrame::kRGBPlane);
@@ -88,15 +89,19 @@ void DecoderRowBased::DecodeBytes(const std::string& encoded_bytes) {
stride = -stride;
}
- // TODO(ajwong): This should be bytes_per_dst_pixel shouldn't this.?
- uint8* out = rect_begin +
- stride * (clip_.y() + row_y_) +
- bytes_per_src_pixel_ * clip_.x();
+ uint8* out = rect_begin + stride * (clip_.y() + row_y_) +
+ kBytesPerPixel * clip_.x();
// Consume all the data in the message.
bool decompress_again = true;
int used = 0;
while (decompress_again && used < in_size) {
+ if (row_y_ >= clip_.height()) {
+ state_ = kError;
+ LOG(WARNING) << "Too much data is received for the given rectangle.";
+ return DECODE_ERROR;
+ }
+
int written = 0;
int consumed = 0;
// TODO(ajwong): This assume source and dest stride are the same, which is
@@ -114,6 +119,60 @@ void DecoderRowBased::DecodeBytes(const std::string& encoded_bytes) {
out += stride;
}
}
+
+ if (state_ == kDone && row_y_ < clip_.height()) {
+ state_ = kError;
+ LOG(WARNING) << "Received LAST_PACKET, but didn't get enough data.";
+ return DECODE_ERROR;
+ }
+
+ return state_ == kDone ? DECODE_DONE : DECODE_IN_PROGRESS;
+}
+
+void DecoderRowBased::UpdateStateForPacket(const VideoPacket* packet) {
+ if (state_ == kError) {
+ return;
+ }
+
+ if (packet->flags() & VideoPacket::FIRST_PACKET) {
+ if (state_ != kReady && state_ != kDone) {
+ state_ = kError;
+ LOG(WARNING) << "Received unexpected FIRST_PACKET.";
+ return;
+ }
+ state_ = kProcessing;
+
+ // Reset the buffer location status variables on the first packet.
+ clip_.SetRect(packet->format().x(), packet->format().y(),
+ packet->format().width(), packet->format().height());
+ row_pos_ = 0;
+ row_y_ = 0;
+ }
+
+ if (state_ != kProcessing) {
+ state_ = kError;
+ LOG(WARNING) << "Received unexpected packet.";
+ return;
+ }
+
+ if (packet->flags() & VideoPacket::LAST_PACKET) {
+ if (state_ != kProcessing) {
+ state_ = kError;
+ LOG(WARNING) << "Received unexpected LAST_PACKET.";
+ return;
+ }
+ state_ = kDone;
+ }
+
+ return;
+}
+
+void DecoderRowBased::GetUpdatedRects(UpdatedRects* rects) {
+ rects->push_back(clip_);
+}
+
+VideoPacketFormat::Encoding DecoderRowBased::Encoding() {
+ return encoding_;
}
} // namespace remoting
diff --git a/remoting/base/decoder_row_based.h b/remoting/base/decoder_row_based.h
index 2deb897..da05c05 100644
--- a/remoting/base/decoder_row_based.h
+++ b/remoting/base/decoder_row_based.h
@@ -19,12 +19,12 @@ class DecoderRowBased : public Decoder {
static DecoderRowBased* CreateVerbatimDecoder();
// Decoder implementation.
- virtual void Reset();
virtual bool IsReadyForData();
- virtual void Initialize(scoped_refptr<media::VideoFrame> frame,
- const gfx::Rect& clip, int bytes_per_src_pixel);
- virtual void DecodeBytes(const std::string& encoded_bytes);
- virtual VideoPacketFormat::Encoding Encoding() { return encoding_; }
+ virtual void Initialize(scoped_refptr<media::VideoFrame> frame);
+ virtual DecodeResult DecodePacket(const VideoPacket* packet);
+ virtual void GetUpdatedRects(UpdatedRects* rects);
+ virtual void Reset();
+ virtual VideoPacketFormat::Encoding Encoding();
// TODO(hclam): Should make this into the Decoder interface.
// TODO(ajwong): Before putting into the interface, we should decide if the
@@ -32,15 +32,20 @@ class DecoderRowBased : public Decoder {
void set_reverse_rows(bool reverse) { reverse_rows_ = reverse; }
private:
- DecoderRowBased(Decompressor* decompressor,
- VideoPacketFormat::Encoding encoding);
-
enum State {
kUninitialized,
kReady,
+ kProcessing,
+ kDone,
kError,
};
+ DecoderRowBased(Decompressor* decompressor,
+ VideoPacketFormat::Encoding encoding);
+
+ // Helper method. Called from DecodePacket to updated state of the decoder.
+ void UpdateStateForPacket(const VideoPacket* packet);
+
// The internal state of the decoder.
State state_;
@@ -56,9 +61,6 @@ class DecoderRowBased : public Decoder {
// The encoding of the incoming stream.
VideoPacketFormat::Encoding encoding_;
- // Number of bytes per pixel from source stream.
- int bytes_per_src_pixel_;
-
// The position in the row that we are updating.
int row_pos_;
diff --git a/remoting/base/decoder_vp8.cc b/remoting/base/decoder_vp8.cc
index 46a6d31..516e9f4 100644
--- a/remoting/base/decoder_vp8.cc
+++ b/remoting/base/decoder_vp8.cc
@@ -30,8 +30,7 @@ DecoderVp8::~DecoderVp8() {
delete codec_;
}
-void DecoderVp8::Initialize(scoped_refptr<media::VideoFrame> frame,
- const gfx::Rect& clip, int bytes_per_src_pixel) {
+void DecoderVp8::Initialize(scoped_refptr<media::VideoFrame> frame) {
DCHECK_EQ(kUninitialized, state_);
if (frame->format() != media::VideoFrame::RGB32) {
@@ -44,7 +43,7 @@ void DecoderVp8::Initialize(scoped_refptr<media::VideoFrame> frame,
state_ = kReady;
}
-void DecoderVp8::DecodeBytes(const std::string& encoded_bytes) {
+Decoder::DecodeResult DecoderVp8::DecodePacket(const VideoPacket* packet) {
DCHECK_EQ(kReady, state_);
// Initialize the codec as needed.
@@ -59,19 +58,19 @@ void DecoderVp8::DecodeBytes(const std::string& encoded_bytes) {
delete codec_;
codec_ = NULL;
state_ = kError;
- return;
+ return DECODE_ERROR;
}
}
// Do the actual decoding.
vpx_codec_err_t ret = vpx_codec_decode(
- codec_, reinterpret_cast<const uint8*>(encoded_bytes.data()),
- encoded_bytes.size(), NULL, 0);
+ codec_, reinterpret_cast<const uint8*>(packet->data().data()),
+ packet->data().size(), NULL, 0);
if (ret != VPX_CODEC_OK) {
LOG(INFO) << "Decoding failed:" << vpx_codec_err_to_string(ret) << "\n"
<< "Details: " << vpx_codec_error(codec_) << "\n"
<< vpx_codec_error_detail(codec_);
- return;
+ return DECODE_ERROR;
}
// Gets the decoded data.
@@ -79,7 +78,7 @@ void DecoderVp8::DecodeBytes(const std::string& encoded_bytes) {
vpx_image_t* image = vpx_codec_get_frame(codec_, &iter);
if (!image) {
LOG(INFO) << "No video frame decoded";
- return;
+ return DECODE_ERROR;
}
// Perform YUV conversion.
@@ -89,6 +88,10 @@ void DecoderVp8::DecodeBytes(const std::string& encoded_bytes) {
image->stride[0], image->stride[1],
frame_->stride(media::VideoFrame::kRGBPlane),
media::YV12);
+ return DECODE_DONE;
+}
+
+void DecoderVp8::GetUpdatedRects(UpdatedRects* rects) {
}
void DecoderVp8::Reset() {
diff --git a/remoting/base/decoder_vp8.h b/remoting/base/decoder_vp8.h
index dfef0b7..ebe082a 100644
--- a/remoting/base/decoder_vp8.h
+++ b/remoting/base/decoder_vp8.h
@@ -17,17 +17,11 @@ class DecoderVp8 : public Decoder {
virtual ~DecoderVp8();
// Decoder implementations.
- virtual void Initialize(scoped_refptr<media::VideoFrame> frame,
- const gfx::Rect& clip, int bytes_per_src_pixel);
-
- virtual void Reset();
-
- // Feeds more data into the decoder.
- virtual void DecodeBytes(const std::string& encoded_bytes);
-
- // Returns true if decoder is ready to accept data via ProcessRectangleData.
+ virtual void Initialize(scoped_refptr<media::VideoFrame> frame);
+ virtual DecodeResult DecodePacket(const VideoPacket* packet);
+ virtual void GetUpdatedRects(UpdatedRects* rects);
virtual bool IsReadyForData();
-
+ virtual void Reset();
virtual VideoPacketFormat::Encoding Encoding();
private:
diff --git a/remoting/base/encoder.h b/remoting/base/encoder.h
index 05e40ff..e34e155 100644
--- a/remoting/base/encoder.h
+++ b/remoting/base/encoder.h
@@ -8,8 +8,6 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "media/base/data_buffer.h"
-// TODO(hclam): Should not depend on internal.pb.h.
-#include "remoting/proto/internal.pb.h"
namespace media {
class DataBuffer;
@@ -18,6 +16,7 @@ namespace media {
namespace remoting {
class CaptureData;
+class VideoPacket;
// A class to perform the task of encoding a continous stream of
// images.
diff --git a/remoting/base/encoder_verbatim.cc b/remoting/base/encoder_verbatim.cc
index 0bbe4e8..1185bff 100644
--- a/remoting/base/encoder_verbatim.cc
+++ b/remoting/base/encoder_verbatim.cc
@@ -6,9 +6,10 @@
#include "base/logging.h"
#include "gfx/rect.h"
-#include "media/base/data_buffer.h"
+#include "net/base/io_buffer.h"
#include "remoting/base/capture_data.h"
#include "remoting/base/util.h"
+#include "remoting/proto/video.pb.h"
namespace remoting {
@@ -17,8 +18,6 @@ namespace remoting {
// Add support for splitting across packets and remove the 10*.
static const int kPacketSize = 10 * 1024 * 1024;
-using media::DataBuffer;
-
EncoderVerbatim::EncoderVerbatim()
: packet_size_(kPacketSize) {
}
@@ -87,7 +86,6 @@ void EncoderVerbatim::PrepareUpdateStart(const gfx::Rect& rect,
format->set_width(rect.width());
format->set_height(rect.height());
format->set_encoding(VideoPacketFormat::ENCODING_VERBATIM);
- format->set_pixel_format(capture_data_->pixel_format());
}
uint8* EncoderVerbatim::GetOutputBuffer(VideoPacket* packet, size_t size) {
diff --git a/remoting/base/encoder_vp8.cc b/remoting/base/encoder_vp8.cc
index 3a0f75b..dd85fb2 100644
--- a/remoting/base/encoder_vp8.cc
+++ b/remoting/base/encoder_vp8.cc
@@ -4,10 +4,10 @@
#include "base/logging.h"
#include "media/base/callback.h"
-#include "media/base/data_buffer.h"
#include "media/base/media.h"
#include "remoting/base/capture_data.h"
#include "remoting/base/encoder_vp8.h"
+#include "remoting/proto/video.pb.h"
extern "C" {
#define VPX_CODEC_DISABLE_COMPAT 1
@@ -114,7 +114,7 @@ bool EncoderVp8::PrepareImage(scoped_refptr<CaptureData> capture_data) {
// giv ae gray scale image after conversion.
// TODO(sergeyu): Move this code to a separate routine.
// TODO(sergeyu): Optimize this code.
- DCHECK(capture_data->pixel_format() == PIXEL_FORMAT_RGB32)
+ DCHECK(capture_data->pixel_format() == media::VideoFrame::RGB32)
<< "Only RGB32 is supported";
uint8* in = capture_data->data_planes().data[0];
const int in_stride = capture_data->data_planes().strides[0];
@@ -197,11 +197,6 @@ void EncoderVp8::Encode(scoped_refptr<CaptureData> capture_data,
message->mutable_format()->set_encoding(VideoPacketFormat::ENCODING_VP8);
message->set_flags(VideoPacket::FIRST_PACKET | VideoPacket::LAST_PACKET);
- message->mutable_format()->set_pixel_format(PIXEL_FORMAT_RGB32);
- message->mutable_format()->set_x(0);
- message->mutable_format()->set_y(0);
- message->mutable_format()->set_width(capture_data->width());
- message->mutable_format()->set_height(capture_data->height());
data_available_callback->Run(message);
delete data_available_callback;
diff --git a/remoting/base/encoder_zlib.cc b/remoting/base/encoder_zlib.cc
index e184fd3..5fc1afe 100644
--- a/remoting/base/encoder_zlib.cc
+++ b/remoting/base/encoder_zlib.cc
@@ -6,10 +6,10 @@
#include "base/logging.h"
#include "gfx/rect.h"
-#include "media/base/data_buffer.h"
#include "remoting/base/capture_data.h"
#include "remoting/base/compressor_zlib.h"
#include "remoting/base/util.h"
+#include "remoting/proto/video.pb.h"
namespace remoting {
@@ -26,7 +26,7 @@ EncoderZlib::~EncoderZlib() {}
void EncoderZlib::Encode(scoped_refptr<CaptureData> capture_data,
bool key_frame,
DataAvailableCallback* data_available_callback) {
- CHECK(capture_data->pixel_format() == PIXEL_FORMAT_RGB32)
+ CHECK(capture_data->pixel_format() == media::VideoFrame::RGB32)
<< "Zlib Encoder only works with RGB32. Got "
<< capture_data->pixel_format();
capture_data_ = capture_data;
@@ -118,7 +118,6 @@ void EncoderZlib::PrepareUpdateStart(const gfx::Rect& rect,
format->set_width(rect.width());
format->set_height(rect.height());
format->set_encoding(VideoPacketFormat::ENCODING_ZLIB);
- format->set_pixel_format(capture_data_->pixel_format());
}
uint8* EncoderZlib::GetOutputBuffer(VideoPacket* packet, size_t size) {
diff --git a/remoting/base/util.cc b/remoting/base/util.cc
index f24d322..5cf70fe 100644
--- a/remoting/base/util.cc
+++ b/remoting/base/util.cc
@@ -6,17 +6,19 @@
#include "base/logging.h"
+using media::VideoFrame;
+
namespace remoting {
-int GetBytesPerPixel(PixelFormat format) {
+int GetBytesPerPixel(VideoFrame::Format format) {
// Note: The order is important here for performance. This is sorted from the
// most common to the less common (PIXEL_FORMAT_ASCII is mostly used
// just for testing).
switch (format) {
- case PIXEL_FORMAT_RGB24: return 3;
- case PIXEL_FORMAT_RGB565: return 2;
- case PIXEL_FORMAT_RGB32: return 4;
- case PIXEL_FORMAT_ASCII: return 1;
+ case VideoFrame::RGB24: return 3;
+ case VideoFrame::RGB565: return 2;
+ case VideoFrame::RGB32: return 4;
+ case VideoFrame::ASCII: return 1;
default:
NOTREACHED() << "Pixel format not supported";
return 0;
diff --git a/remoting/base/util.h b/remoting/base/util.h
index d7f4128..bf5cff5 100644
--- a/remoting/base/util.h
+++ b/remoting/base/util.h
@@ -5,11 +5,12 @@
#ifndef REMOTING_BASE_UTIL_H_
#define REMOTING_BASE_UTIL_H_
-#include "remoting/proto/video.pb.h"
+#include "media/base/video_frame.h"
namespace remoting {
-int GetBytesPerPixel(PixelFormat format);
+// TODO(sergeyu): Move this to media::VideoFrame.
+int GetBytesPerPixel(media::VideoFrame::Format format);
} // namespace remoting
diff --git a/remoting/client/chromoting_client.cc b/remoting/client/chromoting_client.cc
index 4400d4f..1a0192c 100644
--- a/remoting/client/chromoting_client.cc
+++ b/remoting/client/chromoting_client.cc
@@ -12,6 +12,7 @@
#include "remoting/client/rectangle_update_decoder.h"
#include "remoting/proto/internal.pb.h"
#include "remoting/protocol/connection_to_host.h"
+#include "remoting/protocol/session_config.h"
namespace remoting {
@@ -94,27 +95,6 @@ void ChromotingClient::SetViewport(int x, int y, int width, int height) {
view_->SetViewport(x, y, width, height);
}
-void ChromotingClient::HandleMessage(protocol::ConnectionToHost* conn,
- ChromotingHostMessage* msg) {
- if (message_loop() != MessageLoop::current()) {
- message_loop()->PostTask(
- FROM_HERE,
- NewRunnableMethod(this, &ChromotingClient::HandleMessage,
- conn, msg));
- return;
- }
-
- // TODO(ajwong): Consider creating a macro similar to the IPC message
- // mappings. Also reconsider the lifetime of the message object.
- if (msg->has_init_client()) {
- ScopedTracer tracer("Handle Init Client");
- InitClient(msg->init_client());
- delete msg;
- } else {
- NOTREACHED() << "Unknown message received";
- }
-}
-
void ChromotingClient::ProcessVideoPacket(const VideoPacket* packet,
Task* done) {
if (message_loop() != MessageLoop::current()) {
@@ -144,12 +124,13 @@ void ChromotingClient::DispatchPacket() {
ScopedTracer tracer("Handle video packet");
rectangle_decoder_->DecodePacket(
- *packet, NewTracedMethod(this, &ChromotingClient::OnPacketDone));
+ packet, NewTracedMethod(this, &ChromotingClient::OnPacketDone));
}
void ChromotingClient::OnConnectionOpened(protocol::ConnectionToHost* conn) {
VLOG(1) << "ChromotingClient::OnConnectionOpened";
SetConnectionState(CONNECTED);
+ Initialize();
}
void ChromotingClient::OnConnectionClosed(protocol::ConnectionToHost* conn) {
@@ -201,20 +182,31 @@ void ChromotingClient::OnPacketDone() {
DispatchPacket();
}
-void ChromotingClient::InitClient(const InitClientMessage& init_client) {
- DCHECK_EQ(message_loop(), MessageLoop::current());
- TraceContext::tracer()->PrintString("Init received");
+void ChromotingClient::Initialize() {
+ if (message_loop() != MessageLoop::current()) {
+ message_loop()->PostTask(
+ FROM_HERE,
+ NewTracedMethod(this, &ChromotingClient::Initialize));
+ return;
+ }
+
+ TraceContext::tracer()->PrintString("Initializing client.");
+
+ const protocol::SessionConfig* config = connection_->config();
// Resize the window.
- int width = init_client.width();
- int height = init_client.height();
- VLOG(1) << "Init client received geometry: " << width << "x" << height;
+ int width = config->initial_resolution().width;
+ int height = config->initial_resolution().height;
+ VLOG(1) << "Initial screen geometry: " << width << "x" << height;
-// TODO(ajwong): What to do here? Does the decoder actually need to request
-// the right frame size? This is mainly an optimization right?
-// rectangle_decoder_->SetOutputFrameSize(width, height);
+ // TODO(ajwong): What to do here? Does the decoder actually need to request
+ // the right frame size? This is mainly an optimization right?
+ // rectangle_decoder_->SetOutputFrameSize(width, height);
view_->SetViewport(0, 0, width, height);
+ // Initialize the decoder.
+ rectangle_decoder_->Initialize(config);
+
// Schedule the input handler to process the event queue.
input_handler_->Initialize();
}
diff --git a/remoting/client/chromoting_client.h b/remoting/client/chromoting_client.h
index 9ef2b39..7bbe306 100644
--- a/remoting/client/chromoting_client.h
+++ b/remoting/client/chromoting_client.h
@@ -19,9 +19,7 @@ class MessageLoop;
namespace remoting {
-class ChromotingHostMessage;
class ClientContext;
-class InitClientMessage;
class InputHandler;
class RectangleUpdateDecoder;
@@ -55,8 +53,6 @@ class ChromotingClient : public protocol::ConnectionToHost::HostEventCallback,
virtual void SetViewport(int x, int y, int width, int height);
// ConnectionToHost::HostEventCallback implementation.
- virtual void HandleMessage(protocol::ConnectionToHost* conn,
- ChromotingHostMessage* messages);
virtual void OnConnectionOpened(protocol::ConnectionToHost* conn);
virtual void OnConnectionClosed(protocol::ConnectionToHost* conn);
virtual void OnConnectionFailed(protocol::ConnectionToHost* conn);
@@ -75,6 +71,9 @@ class ChromotingClient : public protocol::ConnectionToHost::HostEventCallback,
MessageLoop* message_loop();
+ // Initializes connection.
+ void Initialize();
+
// Convenience method for modifying the state on this object's message loop.
void SetConnectionState(ConnectionState s);
@@ -84,9 +83,6 @@ class ChromotingClient : public protocol::ConnectionToHost::HostEventCallback,
void OnPacketDone();
- // Handles for chromotocol messages.
- void InitClient(const InitClientMessage& msg);
-
// The following are not owned by this class.
ClientConfig config_;
ClientContext* context_;
diff --git a/remoting/client/rectangle_update_decoder.cc b/remoting/client/rectangle_update_decoder.cc
index 4023b30..2690e14 100644
--- a/remoting/client/rectangle_update_decoder.cc
+++ b/remoting/client/rectangle_update_decoder.cc
@@ -13,8 +13,11 @@
#include "remoting/base/tracer.h"
#include "remoting/base/util.h"
#include "remoting/client/frame_consumer.h"
+#include "remoting/protocol/session_config.h"
using media::AutoTaskRunner;
+using remoting::protocol::ChannelConfig;
+using remoting::protocol::SessionConfig;
namespace remoting {
@@ -47,7 +50,30 @@ RectangleUpdateDecoder::RectangleUpdateDecoder(MessageLoop* message_loop,
RectangleUpdateDecoder::~RectangleUpdateDecoder() {
}
-void RectangleUpdateDecoder::DecodePacket(const VideoPacket& packet,
+void RectangleUpdateDecoder::Initialize(const SessionConfig* config) {
+ screen_size_ = gfx::Size(config->initial_resolution().width,
+ config->initial_resolution().height);
+
+ // Initialize decoder based on the selected codec.
+ ChannelConfig::Codec codec = config->video_config().codec;
+ if (codec == ChannelConfig::CODEC_VERBATIM) {
+ TraceContext::tracer()->PrintString("Creating Verbatim decoder.");
+ decoder_.reset(DecoderRowBased::CreateVerbatimDecoder());
+ } else if (codec == ChannelConfig::CODEC_ZIP) {
+ TraceContext::tracer()->PrintString("Creating Zlib decoder");
+ decoder_.reset(DecoderRowBased::CreateZlibDecoder());
+ // TODO(sergeyu): Enable VP8 on ARM builds.
+#if !defined(ARCH_CPU_ARM_FAMILY)
+ } else if (codec == ChannelConfig::CODEC_VP8) {
+ TraceContext::tracer()->PrintString("Creating VP8 decoder");
+ decoder_.reset(new DecoderVp8());
+#endif
+ } else {
+ NOTREACHED() << "Invalid Encoding found: " << codec;
+ }
+}
+
+void RectangleUpdateDecoder::DecodePacket(const VideoPacket* packet,
Task* done) {
if (message_loop_ != MessageLoop::current()) {
message_loop_->PostTask(
@@ -61,28 +87,18 @@ void RectangleUpdateDecoder::DecodePacket(const VideoPacket& packet,
TraceContext::tracer()->PrintString("Decode Packet called.");
- if (!IsValidPacket(packet)) {
- LOG(ERROR) << "Received invalid packet.";
- return;
- }
-
- Task* process_packet_data =
- NewTracedMethod(this,
- &RectangleUpdateDecoder::ProcessPacketData,
- packet, done_runner.release());
-
- if (packet.flags() | VideoPacket::FIRST_PACKET) {
- const VideoPacketFormat& format = packet.format();
-
- InitializeDecoder(format, process_packet_data);
+ if (!decoder_->IsReadyForData()) {
+ InitializeDecoder(
+ NewTracedMethod(this,
+ &RectangleUpdateDecoder::ProcessPacketData,
+ packet, done_runner.release()));
} else {
- process_packet_data->Run();
- delete process_packet_data;
+ ProcessPacketData(packet, done_runner.release());
}
}
void RectangleUpdateDecoder::ProcessPacketData(
- const VideoPacket& packet, Task* done) {
+ const VideoPacket* packet, Task* done) {
AutoTaskRunner done_runner(done);
if (!decoder_->IsReadyForData()) {
@@ -92,70 +108,31 @@ void RectangleUpdateDecoder::ProcessPacketData(
}
TraceContext::tracer()->PrintString("Executing Decode.");
- decoder_->DecodeBytes(packet.data());
- if (packet.flags() | VideoPacket::LAST_PACKET) {
- decoder_->Reset();
+ Decoder::DecodeResult result = decoder_->DecodePacket(packet);
+ if (result == Decoder::DECODE_DONE) {
UpdatedRects* rects = new UpdatedRects();
-
- // Empty out the list of current updated rects so the decoder can keep
- // writing new ones while these are processed.
- rects->swap(updated_rects_);
-
+ decoder_->GetUpdatedRects(rects);
consumer_->OnPartialFrameOutput(frame_, rects,
new PartialFrameCleanup(frame_, rects));
}
}
-// static
-bool RectangleUpdateDecoder::IsValidPacket(const VideoPacket& packet) {
- if (!packet.IsInitialized()) {
- LOG(WARNING) << "Protobuf consistency checks fail.";
- return false;
- }
-
- // First packet must have a format.
- if (packet.flags() | VideoPacket::FIRST_PACKET) {
- if (!packet.has_format()) {
- LOG(WARNING) << "First packet must have format.";
- return false;
- }
-
- // TODO(ajwong): Verify that we don't need to whitelist encodings.
- const VideoPacketFormat& format = packet.format();
- if (!format.has_encoding() ||
- format.encoding() == VideoPacketFormat::ENCODING_INVALID) {
- LOG(WARNING) << "Invalid encoding specified.";
- return false;
- }
- }
-
- // We shouldn't generate null packets.
- if (!packet.has_data()) {
- LOG(WARNING) << "Packet w/o data received.";
- return false;
- }
-
- return true;
-}
-
-void RectangleUpdateDecoder::InitializeDecoder(const VideoPacketFormat& format,
- Task* done) {
+void RectangleUpdateDecoder::InitializeDecoder(Task* done) {
if (message_loop_ != MessageLoop::current()) {
message_loop_->PostTask(
FROM_HERE,
NewTracedMethod(this,
- &RectangleUpdateDecoder::InitializeDecoder,
- format, done));
+ &RectangleUpdateDecoder::InitializeDecoder, done));
return;
}
AutoTaskRunner done_runner(done);
// Check if we need to request a new frame.
if (!frame_ ||
- frame_->width() < static_cast<size_t>(format.width()) ||
- frame_->height() < static_cast<size_t>(format.height())) {
+ frame_->width() != static_cast<size_t>(screen_size_.width()) ||
+ frame_->height() != static_cast<size_t>(screen_size_.height())) {
if (frame_) {
TraceContext::tracer()->PrintString("Releasing old frame.");
consumer_->ReleaseFrame(frame_);
@@ -163,13 +140,12 @@ void RectangleUpdateDecoder::InitializeDecoder(const VideoPacketFormat& format,
}
TraceContext::tracer()->PrintString("Requesting new frame.");
consumer_->AllocateFrame(media::VideoFrame::RGB32,
- format.width(), format.height(),
+ screen_size_.width(), screen_size_.height(),
base::TimeDelta(), base::TimeDelta(),
&frame_,
NewTracedMethod(
this,
&RectangleUpdateDecoder::InitializeDecoder,
- format,
done_runner.release()));
return;
}
@@ -178,45 +154,9 @@ void RectangleUpdateDecoder::InitializeDecoder(const VideoPacketFormat& format,
// and properly disable this class.
CHECK(frame_);
- if (decoder_.get()) {
- // TODO(ajwong): We need to handle changing decoders midstream correctly
- // since someone may feed us a corrupt stream, or manually create a
- // stream that changes decoders. At the very leask, we should not
- // crash the process.
- //
- // For now though, assume that only one encoding is used throughout.
- //
- // Note, this may be as simple as just deleting the current decoder.
- // However, we need to verify the flushing semantics of the decoder first.
- CHECK(decoder_->Encoding() == format.encoding());
- } else {
- // Initialize a new decoder based on this message encoding.
- if (format.encoding() == VideoPacketFormat::ENCODING_VERBATIM) {
- TraceContext::tracer()->PrintString("Creating Verbatim decoder.");
- decoder_.reset(DecoderRowBased::CreateVerbatimDecoder());
- } else if (format.encoding() == VideoPacketFormat::ENCODING_ZLIB) {
- TraceContext::tracer()->PrintString("Creating Zlib decoder");
- decoder_.reset(DecoderRowBased::CreateZlibDecoder());
-// TODO(sergeyu): Enable VP8 on ARM builds.
-#if !defined(ARCH_CPU_ARM_FAMILY)
- } else if (format.encoding() == VideoPacketFormat::ENCODING_VP8) {
- TraceContext::tracer()->PrintString("Creating VP8 decoder");
- decoder_.reset(new DecoderVp8());
-#endif
- } else {
- NOTREACHED() << "Invalid Encoding found: " << format.encoding();
- }
- }
-
- // TODO(ajwong): This can happen in the face of corrupt input data. Figure
- // out the right behavior and make this more resilient.
- CHECK(updated_rects_.empty());
+ decoder_->Reset();
+ decoder_->Initialize(frame_);
- gfx::Rect rectangle_size(format.x(), format.y(),
- format.width(), format.height());
- updated_rects_.push_back(rectangle_size);
- decoder_->Initialize(frame_, rectangle_size,
- GetBytesPerPixel(format.pixel_format()));
TraceContext::tracer()->PrintString("Decoder is Initialized");
}
diff --git a/remoting/client/rectangle_update_decoder.h b/remoting/client/rectangle_update_decoder.h
index e6c344e..1eb93b5 100644
--- a/remoting/client/rectangle_update_decoder.h
+++ b/remoting/client/rectangle_update_decoder.h
@@ -7,8 +7,8 @@
#include "base/scoped_ptr.h"
#include "base/task.h"
+#include "gfx/size.h"
#include "media/base/video_frame.h"
-#include "remoting/base/decoder.h" // For UpdatedRects
class MessageLoop;
@@ -19,14 +19,22 @@ class FrameConsumer;
class VideoPacketFormat;
class VideoPacket;
+namespace protocol {
+class SessionConfig;
+} // namespace protocol
+
// TODO(ajwong): Re-examine this API, especially with regards to how error
// conditions on each step are reported. Should they be CHECKs? Logs? Other?
+// TODO(sergeyu): Rename this class.
class RectangleUpdateDecoder {
public:
RectangleUpdateDecoder(MessageLoop* message_loop,
FrameConsumer* consumer);
~RectangleUpdateDecoder();
+ // Initializes decoder with the infromation from the protocol config.
+ void Initialize(const protocol::SessionConfig* config);
+
// Decodes the contents of |packet| calling OnPartialFrameOutput() in the
// regsitered as data is avaialable. DecodePacket may keep a reference to
// |packet| so the |packet| must remain alive and valid until |done| is
@@ -34,21 +42,20 @@ class RectangleUpdateDecoder {
//
// TODO(ajwong): Should packet be a const pointer to make the lifetime
// more clear?
- void DecodePacket(const VideoPacket& packet, Task* done);
+ void DecodePacket(const VideoPacket* packet, Task* done);
private:
- static bool IsValidPacket(const VideoPacket& packet);
+ void InitializeDecoder(Task* done);
- void InitializeDecoder(const VideoPacketFormat& format, Task* done);
-
- void ProcessPacketData(const VideoPacket& packet, Task* done);
+ void ProcessPacketData(const VideoPacket* packet, Task* done);
// Pointers to infrastructure objects. Not owned.
MessageLoop* message_loop_;
FrameConsumer* consumer_;
+ gfx::Size screen_size_;
+
scoped_ptr<Decoder> decoder_;
- UpdatedRects updated_rects_;
// Framebuffer for the decoder.
scoped_refptr<media::VideoFrame> frame_;
diff --git a/remoting/host/capturer.cc b/remoting/host/capturer.cc
index 9e63041..b66e577 100644
--- a/remoting/host/capturer.cc
+++ b/remoting/host/capturer.cc
@@ -13,7 +13,7 @@ namespace remoting {
Capturer::Capturer()
: width_(0),
height_(0),
- pixel_format_(PIXEL_FORMAT_INVALID),
+ pixel_format_(media::VideoFrame::INVALID),
bytes_per_row_(0),
current_buffer_(0) {
}
@@ -21,6 +21,21 @@ Capturer::Capturer()
Capturer::~Capturer() {
}
+// Return the width of the screen.
+int Capturer::width() const {
+ return width_;
+}
+
+// Return the height of the screen.
+int Capturer::height() const {
+ return height_;
+}
+
+// Return the pixel format of the screen.
+media::VideoFrame::Format Capturer::pixel_format() const {
+ return pixel_format_;
+}
+
void Capturer::ClearInvalidRects() {
AutoLock auto_inval_rects_lock(inval_rects_lock_);
inval_rects_.clear();
diff --git a/remoting/host/capturer.h b/remoting/host/capturer.h
index 8a11ed5..f9e967a 100644
--- a/remoting/host/capturer.h
+++ b/remoting/host/capturer.h
@@ -49,13 +49,13 @@ class Capturer {
virtual void ScreenConfigurationChanged() = 0;
// Return the width of the screen.
- virtual int width() const { return width_; }
+ virtual int width() const;
// Return the height of the screen.
- virtual int height() const { return height_; }
+ virtual int height() const;
// Return the pixel format of the screen.
- virtual PixelFormat pixel_format() const { return pixel_format_; }
+ virtual media::VideoFrame::Format pixel_format() const;
// Clear out the list of invalid rects.
void ClearInvalidRects();
@@ -116,7 +116,7 @@ class Capturer {
int height_;
// Format of pixels returned in buffer.
- PixelFormat pixel_format_;
+ media::VideoFrame::Format pixel_format_;
int bytes_per_row_;
// The current buffer with valid data for reading.
diff --git a/remoting/host/capturer_fake.cc b/remoting/host/capturer_fake.cc
index 072f19a..80f147a 100644
--- a/remoting/host/capturer_fake.cc
+++ b/remoting/host/capturer_fake.cc
@@ -38,7 +38,7 @@ CapturerFake::~CapturerFake() {
void CapturerFake::ScreenConfigurationChanged() {
width_ = kWidth;
height_ = kHeight;
- pixel_format_ = PIXEL_FORMAT_RGB32;
+ pixel_format_ = media::VideoFrame::RGB32;
bytes_per_row_ = width_ * kBytesPerPixel;
// Create memory for the buffers.
diff --git a/remoting/host/capturer_fake_ascii.cc b/remoting/host/capturer_fake_ascii.cc
index 4b259a9..2220cba 100644
--- a/remoting/host/capturer_fake_ascii.cc
+++ b/remoting/host/capturer_fake_ascii.cc
@@ -21,7 +21,7 @@ CapturerFakeAscii::~CapturerFakeAscii() {
void CapturerFakeAscii::ScreenConfigurationChanged() {
width_ = kWidth;
height_ = kHeight;
- pixel_format_ = PIXEL_FORMAT_ASCII;
+ pixel_format_ = media::VideoFrame::ASCII;
bytes_per_row_ = width_ * kBytesPerPixel;
// Create memory for the buffers.
diff --git a/remoting/host/capturer_gdi.cc b/remoting/host/capturer_gdi.cc
index 7742ff9..0a06de4 100644
--- a/remoting/host/capturer_gdi.cc
+++ b/remoting/host/capturer_gdi.cc
@@ -57,7 +57,6 @@ void CapturerGdi::ScreenConfigurationChanged() {
int rounded_width = (width_ + 3) & (~3);
// Dimensions of screen.
- pixel_format_ = PIXEL_FORMAT_RGB32;
bytes_per_row_ = rounded_width * kBytesPerPixel;
// Create a differ for this screen size.
diff --git a/remoting/host/capturer_linux.cc b/remoting/host/capturer_linux.cc
index 99013d0..5b982e1 100644
--- a/remoting/host/capturer_linux.cc
+++ b/remoting/host/capturer_linux.cc
@@ -235,7 +235,7 @@ void CapturerLinuxPimpl::CaptureRects(
scoped_refptr<CaptureData> capture_data(
new CaptureData(planes, capturer_->width(), capturer_->height(),
- PIXEL_FORMAT_RGB32));
+ media::VideoFrame::RGB32));
for (InvalidRects::const_iterator it = rects.begin();
it != rects.end();
diff --git a/remoting/host/capturer_mac.cc b/remoting/host/capturer_mac.cc
index 3eafa34..4595a95 100644
--- a/remoting/host/capturer_mac.cc
+++ b/remoting/host/capturer_mac.cc
@@ -13,9 +13,9 @@ namespace remoting {
CapturerMac::CapturerMac() : cgl_context_(NULL) {
// TODO(dmaclach): move this initialization out into session_manager,
// or at least have session_manager call into here to initialize it.
- CGError err
- = CGRegisterScreenRefreshCallback(CapturerMac::ScreenRefreshCallback,
- this);
+ CGError err =
+ CGRegisterScreenRefreshCallback(CapturerMac::ScreenRefreshCallback,
+ this);
DCHECK_EQ(err, kCGErrorSuccess);
err = CGScreenRegisterMoveCallback(CapturerMac::ScreenUpdateMoveCallback,
this);
@@ -47,7 +47,6 @@ void CapturerMac::ScreenConfigurationChanged() {
width_ = CGDisplayPixelsWide(mainDevice);
height_ = CGDisplayPixelsHigh(mainDevice);
bytes_per_row_ = width_ * sizeof(uint32_t);
- pixel_format_ = PIXEL_FORMAT_RGB32;
size_t buffer_size = height() * bytes_per_row_;
for (int i = 0; i < kNumBuffers; ++i) {
buffers_[i].reset(new uint8[buffer_size]);
diff --git a/remoting/host/session_manager.cc b/remoting/host/session_manager.cc
index b5c3a31..ebd1061 100644
--- a/remoting/host/session_manager.cc
+++ b/remoting/host/session_manager.cc
@@ -12,6 +12,7 @@
#include "remoting/base/capture_data.h"
#include "remoting/base/tracer.h"
#include "remoting/proto/control.pb.h"
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/client_stub.h"
#include "remoting/protocol/connection_to_client.h"
#include "remoting/protocol/message_decoder.h"
@@ -256,15 +257,7 @@ void SessionManager::DoGetInitInfo(
ScopedTracer tracer("init");
- // Sends the init message to the connection.
- network_loop_->PostTask(
- FROM_HERE,
- NewTracedMethod(this, &SessionManager::DoSendInit, connection,
- capturer()->width(), capturer()->height()));
-
- // And then add the connection to the list so it can receive update stream.
- // It is important we do so in such order or the connection will receive
- // update stream before init message.
+ // Add the client to the list so it can receive update stream.
network_loop_->PostTask(
FROM_HERE,
NewTracedMethod(this, &SessionManager::DoAddClient, connection));
@@ -350,19 +343,6 @@ void SessionManager::DoSendVideoPacket(VideoPacket* packet) {
TraceContext::tracer()->PrintString("DoSendUpdate done");
}
-void SessionManager::DoSendInit(scoped_refptr<ConnectionToClient> connection,
- int width, int height) {
- DCHECK_EQ(network_loop_, MessageLoop::current());
-
- // Sends the connection init information.
- protocol::NotifyResolutionRequest* message =
- new protocol::NotifyResolutionRequest();
- message->set_width(width);
- message->set_height(height);
- connection->client_stub()->NotifyResolution(message,
- NewDeleteMessageTask(message));
-}
-
void SessionManager::DoAddClient(scoped_refptr<ConnectionToClient> connection) {
DCHECK_EQ(network_loop_, MessageLoop::current());
diff --git a/remoting/host/session_manager_unittest.cc b/remoting/host/session_manager_unittest.cc
index 2fc59e6..bfe1fa4 100644
--- a/remoting/host/session_manager_unittest.cc
+++ b/remoting/host/session_manager_unittest.cc
@@ -7,6 +7,7 @@
#include "remoting/base/mock_objects.h"
#include "remoting/host/mock_objects.h"
#include "remoting/host/session_manager.h"
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/mock_objects.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -22,7 +23,7 @@ namespace remoting {
static const int kWidth = 640;
static const int kHeight = 480;
-static const PixelFormat kFormat = PIXEL_FORMAT_RGB32;
+static const media::VideoFrame::Format kFormat = media::VideoFrame::RGB32;
static const VideoPacketFormat::Encoding kEncoding =
VideoPacketFormat::ENCODING_VERBATIM;
@@ -91,7 +92,6 @@ TEST_F(SessionManagerTest, DISABLED_OneRecordCycle) {
// Add the mock client connection to the session.
EXPECT_CALL(*capturer_, width()).WillRepeatedly(Return(kWidth));
EXPECT_CALL(*capturer_, height()).WillRepeatedly(Return(kHeight));
- EXPECT_CALL(*connection_, SendInitClientMessage(kWidth, kHeight));
record_->AddConnection(connection_);
// First the capturer is called.
diff --git a/remoting/proto/internal.proto b/remoting/proto/internal.proto
index fbe3181..7a8e908 100644
--- a/remoting/proto/internal.proto
+++ b/remoting/proto/internal.proto
@@ -6,6 +6,7 @@
syntax = "proto2";
+import "control.proto";
import "event.proto";
import "video.proto";
@@ -13,14 +14,6 @@ option optimize_for = LITE_RUNTIME;
package remoting;
-// Defines the message that is sent from the host to the client.
-// Only one of these messages should be present.
-// NEXT ID: 5
-message ChromotingHostMessage {
- optional InitClientMessage init_client= 1;
- optional VideoPacket video_packet = 2;
-}
-
// Defines the message that is sent from the client to the host.
// Only one of the optional messages should be present.
// NEXT ID: 7
diff --git a/remoting/proto/video.proto b/remoting/proto/video.proto
index 963c118..b193461c 100644
--- a/remoting/proto/video.proto
+++ b/remoting/proto/video.proto
@@ -19,23 +19,6 @@ message InitClientMessage {
required int32 height = 2;
}
-// Identifies the pixel format.
-// Note that this list should match exactly the same as
-// media::VideoFrame::Format in media/base/video_frame.h.
-enum PixelFormat {
- PIXEL_FORMAT_INVALID = 0;
- PIXEL_FORMAT_RGB555 = 1;
- PIXEL_FORMAT_RGB565 = 2;
- PIXEL_FORMAT_RGB24 = 3;
- PIXEL_FORMAT_RGB32 = 4;
- PIXEL_FORMAT_RGBA = 5;
- PIXEL_FORMAT_YV12 = 6;
- PIXEL_FORMAT_YV16 = 7;
- PIXEL_FORMAT_NV12 = 8;
- PIXEL_FORMAT_EMPTY = 9;
- PIXEL_FORMAT_ASCII = 10;
-}
-
// TODO(ajwong): Determine if these fields should be optional or required.
message VideoPacketFormat {
// Identifies how the image was encoded.
@@ -56,9 +39,6 @@ message VideoPacketFormat {
// The encoding used for this image update.
optional Encoding encoding = 5 [default = ENCODING_INVALID];
-
- // The pixel format of this image.
- optional PixelFormat pixel_format = 6 [default = PIXEL_FORMAT_RGB24];
}
message VideoPacket {
diff --git a/remoting/protocol/connection_to_host.h b/remoting/protocol/connection_to_host.h
index 39aeecd..e2b0ce2 100644
--- a/remoting/protocol/connection_to_host.h
+++ b/remoting/protocol/connection_to_host.h
@@ -15,6 +15,7 @@
namespace remoting {
namespace protocol {
+class SessionConfig;
class VideoStub;
class ConnectionToHost {
@@ -23,11 +24,6 @@ class ConnectionToHost {
public:
virtual ~HostEventCallback() {}
- // Handles an event received by the protocol::ConnectionToHost. Ownership
- // of the message is passed to the callee.
- virtual void HandleMessage(ConnectionToHost* conn,
- ChromotingHostMessage* message) = 0;
-
// Called when the network connection is opened.
virtual void OnConnectionOpened(ConnectionToHost* conn) = 0;
@@ -48,6 +44,8 @@ class ConnectionToHost {
VideoStub* video_stub) = 0;
virtual void Disconnect() = 0;
+ virtual const SessionConfig* config() = 0;
+
// Send an input event to the host.
virtual void SendEvent(const ChromotingClientMessage& msg) = 0;
diff --git a/remoting/protocol/jingle_connection_to_host.cc b/remoting/protocol/jingle_connection_to_host.cc
index e31d351..065ae55 100644
--- a/remoting/protocol/jingle_connection_to_host.cc
+++ b/remoting/protocol/jingle_connection_to_host.cc
@@ -64,8 +64,10 @@ void JingleConnectionToHost::Disconnect() {
}
}
-void JingleConnectionToHost::OnControlMessage(ChromotingHostMessage* msg) {
- event_callback_->HandleMessage(this, msg);
+void JingleConnectionToHost::OnControlMessage(ControlMessage* msg) {
+ // TODO(sergeyu): Remove this method and pass ClientStub to the control
+ // stream dispatcher.
+ delete msg;
}
void JingleConnectionToHost::InitSession() {
@@ -112,6 +114,10 @@ void JingleConnectionToHost::OnServerClosed() {
}
}
+const SessionConfig* JingleConnectionToHost::config() {
+ return session_->config();
+}
+
void JingleConnectionToHost::SendEvent(const ChromotingClientMessage& msg) {
// This drops the message if we are not connected yet.
event_writer_.SendMessage(msg);
@@ -156,7 +162,7 @@ void JingleConnectionToHost::OnSessionStateChange(
case protocol::Session::CONNECTED:
// Initialize reader and writer.
- control_reader_.Init<ChromotingHostMessage>(
+ control_reader_.Init<ControlMessage>(
session_->control_channel(),
NewCallback(this, &JingleConnectionToHost::OnControlMessage));
event_writer_.Init(session_->event_channel());
diff --git a/remoting/protocol/jingle_connection_to_host.h b/remoting/protocol/jingle_connection_to_host.h
index fe56e95..c25d872 100644
--- a/remoting/protocol/jingle_connection_to_host.h
+++ b/remoting/protocol/jingle_connection_to_host.h
@@ -34,6 +34,7 @@ class MessageLoop;
namespace remoting {
class JingleThread;
+class VideoPacket;
namespace protocol {
@@ -54,6 +55,8 @@ class JingleConnectionToHost : public ConnectionToHost,
VideoStub* video_stub);
virtual void Disconnect();
+ virtual const SessionConfig* config();
+
virtual void SendEvent(const ChromotingClientMessage& msg);
// JingleClient::Callback interface.
@@ -76,7 +79,7 @@ class JingleConnectionToHost : public ConnectionToHost,
void InitSession();
// Callback for |control_reader_|.
- void OnControlMessage(ChromotingHostMessage* msg);
+ void OnControlMessage(ControlMessage* msg);
// Callback for |video_reader_|.
void OnVideoPacket(VideoPacket* packet);
diff --git a/remoting/protocol/message_decoder_unittest.cc b/remoting/protocol/message_decoder_unittest.cc
index 90c6680..c893d37 100644
--- a/remoting/protocol/message_decoder_unittest.cc
+++ b/remoting/protocol/message_decoder_unittest.cc
@@ -15,11 +15,9 @@
namespace remoting {
-static const int kWidth = 640;
-static const int kHeight = 480;
-static const std::string kTestData = "Chromoting rockz";
+static const int kTestKey = 142;
-static void AppendMessage(const ChromotingHostMessage& msg,
+static void AppendMessage(const ChromotingClientMessage& msg,
std::string* buffer) {
// Contains one encoded message.
scoped_refptr<net::IOBufferWithSize> encoded_msg;
@@ -32,17 +30,12 @@ static void PrepareData(uint8** buffer, int* size) {
// Contains all encoded messages.
std::string encoded_data;
- // The first message is InitClient.
- ChromotingHostMessage msg;
- msg.mutable_init_client()->set_width(kWidth);
- msg.mutable_init_client()->set_height(kHeight);
- AppendMessage(msg, &encoded_data);
- msg.Clear();
+ ChromotingClientMessage msg;
// Then append 10 update sequences to the data.
for (int i = 0; i < 10; ++i) {
- msg.mutable_video_packet()->set_sequence_number(0);
- msg.mutable_video_packet()->set_data(kTestData);
+ msg.mutable_key_event()->set_key(kTestKey + i);
+ msg.mutable_key_event()->set_pressed((i % 2) != 0);
AppendMessage(msg, &encoded_data);
msg.Clear();
}
@@ -67,7 +60,7 @@ void SimulateReadSequence(const int read_sequence[], int sequence_size) {
// Then feed the protocol decoder using the above generated data and the
// read pattern.
- std::list<ChromotingHostMessage*> message_list;
+ std::list<ChromotingClientMessage*> message_list;
for (int i = 0; i < size;) {
// First generate the amount to feed the decoder.
int read = std::min(size - i, read_sequence[i % sequence_size]);
@@ -80,19 +73,21 @@ void SimulateReadSequence(const int read_sequence[], int sequence_size) {
}
// Then verify the decoded messages.
- EXPECT_EQ(11u, message_list.size());
- EXPECT_TRUE(message_list.front()->has_init_client());
- delete message_list.front();
- message_list.pop_front();
+ EXPECT_EQ(10u, message_list.size());
- for (std::list<ChromotingHostMessage*>::iterator it =
+ int index = 0;
+ for (std::list<ChromotingClientMessage*>::iterator it =
message_list.begin();
it != message_list.end(); ++it) {
- ChromotingHostMessage* message = *it;
+ ChromotingClientMessage* message = *it;
// Partial update stream.
- EXPECT_TRUE(message->has_video_packet());
- EXPECT_EQ(kTestData,
- message->video_packet().data().data());
+ EXPECT_TRUE(message->has_key_event());
+
+ // TODO(sergeyu): Don't use index here. Instead store the expected values
+ // in an array.
+ EXPECT_EQ(kTestKey + index, message->key_event().key());
+ EXPECT_EQ((index % 2) != 0, message->key_event().pressed());
+ ++index;
}
STLDeleteElements(&message_list);
}
diff --git a/remoting/protocol/protobuf_video_reader.cc b/remoting/protocol/protobuf_video_reader.cc
index f777363..31f4aae 100644
--- a/remoting/protocol/protobuf_video_reader.cc
+++ b/remoting/protocol/protobuf_video_reader.cc
@@ -5,18 +5,23 @@
#include "remoting/protocol/protobuf_video_reader.h"
#include "base/task.h"
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/session.h"
namespace remoting {
namespace protocol {
-ProtobufVideoReader::ProtobufVideoReader() { }
+ProtobufVideoReader::ProtobufVideoReader(VideoPacketFormat::Encoding encoding)
+ : encoding_(encoding) {
+}
+
ProtobufVideoReader::~ProtobufVideoReader() { }
void ProtobufVideoReader::Init(protocol::Session* session,
VideoStub* video_stub) {
- reader_.Init<VideoPacket>(session->video_channel(),
- NewCallback(this, &ProtobufVideoReader::OnNewData));
+ reader_.Init<VideoPacket>(
+ session->video_channel(),
+ NewCallback(this, &ProtobufVideoReader::OnNewData));
video_stub_ = video_stub;
}
diff --git a/remoting/protocol/protobuf_video_reader.h b/remoting/protocol/protobuf_video_reader.h
index 669cd0b..42e1c97 100644
--- a/remoting/protocol/protobuf_video_reader.h
+++ b/remoting/protocol/protobuf_video_reader.h
@@ -5,6 +5,7 @@
#ifndef REMOTING_PROTOCOL_PROTOBUF_VIDEO_READER_H_
#define REMOTING_PROTOCOL_PROTOBUF_VIDEO_READER_H_
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/message_reader.h"
#include "remoting/protocol/video_reader.h"
@@ -15,7 +16,7 @@ class Session;
class ProtobufVideoReader : public VideoReader {
public:
- ProtobufVideoReader();
+ ProtobufVideoReader(VideoPacketFormat::Encoding encoding);
virtual ~ProtobufVideoReader();
// VideoReader interface.
@@ -25,6 +26,8 @@ class ProtobufVideoReader : public VideoReader {
private:
void OnNewData(VideoPacket* packet);
+ VideoPacketFormat::Encoding encoding_;
+
MessageReader reader_;
// The stub that processes all received packets.
diff --git a/remoting/protocol/protobuf_video_writer.cc b/remoting/protocol/protobuf_video_writer.cc
index 9bbd88b..fdd3b9e 100644
--- a/remoting/protocol/protobuf_video_writer.cc
+++ b/remoting/protocol/protobuf_video_writer.cc
@@ -4,6 +4,7 @@
#include "remoting/protocol/protobuf_video_writer.h"
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/rtp_writer.h"
#include "remoting/protocol/session.h"
#include "remoting/protocol/util.h"
diff --git a/remoting/protocol/rtp_video_reader.cc b/remoting/protocol/rtp_video_reader.cc
index 8660371..dbc2555 100644
--- a/remoting/protocol/rtp_video_reader.cc
+++ b/remoting/protocol/rtp_video_reader.cc
@@ -5,6 +5,7 @@
#include "remoting/protocol/rtp_video_reader.h"
#include "base/task.h"
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/session.h"
namespace remoting {
@@ -13,8 +14,7 @@ namespace protocol {
RtpVideoReader::RtpVideoReader() { }
RtpVideoReader::~RtpVideoReader() { }
-void RtpVideoReader::Init(protocol::Session* session,
- VideoStub* video_stub) {
+void RtpVideoReader::Init(protocol::Session* session, VideoStub* video_stub) {
rtp_reader_.Init(session->video_rtp_channel(),
NewCallback(this, &RtpVideoReader::OnRtpPacket));
video_stub_ = video_stub;
@@ -26,15 +26,10 @@ void RtpVideoReader::Close() {
void RtpVideoReader::OnRtpPacket(const RtpPacket& rtp_packet) {
VideoPacket* packet = new VideoPacket();
- packet->set_data(rtp_packet.payload, rtp_packet.payload_size);
+ packet->set_data(rtp_packet.payload, rtp_packet.payload_size);
packet->mutable_format()->set_encoding(VideoPacketFormat::ENCODING_VP8);
packet->set_flags(rtp_packet.header.marker ? VideoPacket::LAST_PACKET : 0);
- packet->mutable_format()->set_pixel_format(PIXEL_FORMAT_RGB32);
- packet->mutable_format()->set_x(0);
- packet->mutable_format()->set_y(0);
- packet->mutable_format()->set_width(800);
- packet->mutable_format()->set_height(600);
video_stub_->ProcessVideoPacket(packet, new DeleteTask<VideoPacket>(packet));
}
diff --git a/remoting/protocol/rtp_video_writer.cc b/remoting/protocol/rtp_video_writer.cc
index 5bd1c80..1a7ead4 100644
--- a/remoting/protocol/rtp_video_writer.cc
+++ b/remoting/protocol/rtp_video_writer.cc
@@ -4,6 +4,7 @@
#include "remoting/protocol/rtp_video_writer.h"
+#include "remoting/proto/video.pb.h"
#include "remoting/protocol/session.h"
#include "remoting/protocol/rtp_writer.h"
@@ -15,12 +16,11 @@ RtpVideoWriter::RtpVideoWriter() { }
RtpVideoWriter::~RtpVideoWriter() { }
void RtpVideoWriter::Init(protocol::Session* session) {
- rtp_writer_.Init(session->video_rtp_channel(),
- session->video_rtcp_channel());
+ rtp_writer_.Init(session->video_rtp_channel(), session->video_rtcp_channel());
}
void RtpVideoWriter::SendPacket(const VideoPacket& packet) {
- rtp_writer_.SendPacket(packet.data().data(), packet.data().length(),
+ rtp_writer_.SendPacket(packet.data().data(), packet.data().size(),
packet.timestamp());
}
diff --git a/remoting/protocol/stream_writer.cc b/remoting/protocol/stream_writer.cc
index 33a4798..9e1ddd4 100644
--- a/remoting/protocol/stream_writer.cc
+++ b/remoting/protocol/stream_writer.cc
@@ -39,7 +39,7 @@ bool EventStreamWriter::SendMessage(const ChromotingClientMessage& message) {
return buffered_writer_->Write(SerializeAndFrameMessage(message));
}
-bool ControlStreamWriter::SendMessage(const ChromotingHostMessage& message) {
+bool ControlStreamWriter::SendMessage(const ControlMessage& message) {
return buffered_writer_->Write(SerializeAndFrameMessage(message));
}
diff --git a/remoting/protocol/stream_writer.h b/remoting/protocol/stream_writer.h
index 7c5a3bd..15b9bdd 100644
--- a/remoting/protocol/stream_writer.h
+++ b/remoting/protocol/stream_writer.h
@@ -50,7 +50,7 @@ class ControlStreamWriter : public StreamWriterBase {
public:
// Sends the |message| or returns false if called before Init().
// Can be called on any thread.
- bool SendMessage(const ChromotingHostMessage& message);
+ bool SendMessage(const ControlMessage& message);
};
} // namespace protocol
diff --git a/remoting/protocol/video_reader.cc b/remoting/protocol/video_reader.cc
index 832eb04..2c1585a 100644
--- a/remoting/protocol/video_reader.cc
+++ b/remoting/protocol/video_reader.cc
@@ -19,7 +19,10 @@ VideoReader* VideoReader::Create(const SessionConfig* config) {
if (video_config.transport == ChannelConfig::TRANSPORT_SRTP) {
return new RtpVideoReader();
} else if (video_config.transport == ChannelConfig::TRANSPORT_STREAM) {
- return new ProtobufVideoReader();
+ if (video_config.codec == ChannelConfig::CODEC_ZIP)
+ return new ProtobufVideoReader(VideoPacketFormat::ENCODING_ZLIB);
+ else if (video_config.codec == ChannelConfig::CODEC_VERBATIM)
+ return new ProtobufVideoReader(VideoPacketFormat::ENCODING_VERBATIM);
}
return NULL;
}
diff --git a/remoting/protocol/video_stub.h b/remoting/protocol/video_stub.h
index 5c59ca0..c1b6b11 100644
--- a/remoting/protocol/video_stub.h
+++ b/remoting/protocol/video_stub.h
@@ -5,11 +5,12 @@
#ifndef REMOTING_PROTOCOL_VIDEO_STUB_H_
#define REMOTING_PROTOCOL_VIDEO_STUB_H_
-#include "remoting/proto/video.pb.h"
-
class Task;
namespace remoting {
+
+class VideoPacket;
+
namespace protocol {
class VideoStub {
diff --git a/remoting/protocol/video_writer.h b/remoting/protocol/video_writer.h
index eae6b6e..eafa2b4 100644
--- a/remoting/protocol/video_writer.h
+++ b/remoting/protocol/video_writer.h
@@ -11,11 +11,10 @@
#define REMOTING_PROTOCOL_VIDEO_WRITER_H_
#include "base/basictypes.h"
-#include "remoting/proto/video.pb.h"
namespace remoting {
-class ChromotocolConnection;
+class VideoPacket;
namespace protocol {