summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpwestin@google.com <pwestin@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2013-09-10 16:37:22 +0000
committerpwestin@google.com <pwestin@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2013-09-10 16:37:22 +0000
commit4cd4bc0edc31c9fad74a5240a3a4b3ca8a8fc69e (patch)
tree03dd0e114087ca95b621397b7b1e6f5089b65c5b
parented80a09fcf96a63b22eb6dda80f81482715c0a29 (diff)
downloadchromium_src-4cd4bc0edc31c9fad74a5240a3a4b3ca8a8fc69e.zip
chromium_src-4cd4bc0edc31c9fad74a5240a3a4b3ca8a8fc69e.tar.gz
chromium_src-4cd4bc0edc31c9fad74a5240a3a4b3ca8a8fc69e.tar.bz2
Adding cast threads and video sender folder to cast.
BUG= Review URL: https://chromiumcodereview.appspot.com/23622014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@222279 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--media/cast/cast.gyp4
-rw-r--r--media/cast/cast_defines.h71
-rw-r--r--media/cast/cast_sender.gyp4
-rw-r--r--media/cast/cast_sender.h60
-rw-r--r--media/cast/cast_thread.cc62
-rw-r--r--media/cast/cast_thread.h73
-rw-r--r--media/cast/rtcp/rtcp.cc27
-rw-r--r--media/cast/rtcp/rtcp.h30
-rw-r--r--media/cast/rtcp/rtcp_unittest.cc18
-rw-r--r--media/cast/rtp_common/rtp_defines.h22
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc30
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h26
-rw-r--r--media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc14
-rw-r--r--media/cast/rtp_sender/rtp_sender.cc41
-rw-r--r--media/cast/rtp_sender/rtp_sender.h25
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.cc352
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.gypi19
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.h87
-rw-r--r--media/cast/video_sender/mock_video_encoder_controller.h31
-rw-r--r--media/cast/video_sender/video_encoder.cc112
-rw-r--r--media/cast/video_sender/video_encoder.h80
-rw-r--r--media/cast/video_sender/video_encoder_unittest.cc282
-rw-r--r--media/cast/video_sender/video_sender.cc344
-rw-r--r--media/cast/video_sender/video_sender.gypi31
-rw-r--r--media/cast/video_sender/video_sender.h144
-rw-r--r--media/cast/video_sender/video_sender_unittest.cc225
26 files changed, 2045 insertions, 169 deletions
diff --git a/media/cast/cast.gyp b/media/cast/cast.gyp
index 33f6afd..ecced89 100644
--- a/media/cast/cast.gyp
+++ b/media/cast/cast.gyp
@@ -16,6 +16,8 @@
'sources': [
'cast_config.h',
'cast_config.cc',
+ 'cast_thread.h',
+ 'cast_thread.cc',
], # source
},
{
@@ -71,6 +73,8 @@
'rtcp/rtcp_receiver_unittest.cc',
'rtcp/rtcp_sender_unittest.cc',
'rtcp/rtcp_unittest.cc',
+ 'video_sender/video_encoder_unittest.cc',
+ 'video_sender/video_sender_unittest.cc',
], # source
},
], # targets
diff --git a/media/cast/cast_defines.h b/media/cast/cast_defines.h
index 7239148..01edca5 100644
--- a/media/cast/cast_defines.h
+++ b/media/cast/cast_defines.h
@@ -35,6 +35,77 @@ enum DefaultSettings {
kDefaultRtpMaxDelayMs = 100,
};
+// TODO(pwestin): Re-factor the functions bellow into a class with static
+// methods.
+
+// Magic fractional unit. Used to convert time (in microseconds) to/from
+// fractional NTP seconds.
+static const double kMagicFractionalUnit = 4.294967296E3;
+
+// Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
+// 1 January 1900.
+static const int64 kNtpEpochDeltaSeconds = GG_INT64_C(9435484800);
+static const int64 kNtpEpochDeltaMicroseconds =
+ kNtpEpochDeltaSeconds * base::Time::kMicrosecondsPerSecond;
+
+inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
+ return (frame_id != prev_frame_id) &&
+ static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
+}
+
+inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
+ return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
+}
+
+inline bool IsNewerPacketId(uint16 packet_id, uint16 prev_packet_id) {
+ return (packet_id != prev_packet_id) &&
+ static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
+}
+
+inline bool IsNewerSequenceNumber(uint16 sequence_number,
+ uint16 prev_sequence_number) {
+ // Same function as IsNewerPacketId just different data and name.
+ return IsNewerPacketId(sequence_number, prev_sequence_number);
+}
+
+// Create a NTP diff from seconds and fractions of seconds; delay_fraction is
+// fractions of a second where 0x80000000 is half a second.
+inline uint32 ConvertToNtpDiff(uint32 delay_seconds, uint32 delay_fraction) {
+ return ((delay_seconds & 0x0000FFFF) << 16) +
+ ((delay_fraction & 0xFFFF0000) >> 16);
+}
+
+inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
+ uint32 delay_ms = (ntp_delay & 0x0000ffff) * 1000;
+ delay_ms >>= 16;
+ delay_ms += ((ntp_delay & 0xffff0000) >> 16) * 1000;
+ return base::TimeDelta::FromMilliseconds(delay_ms);
+}
+
+inline void ConvertTimeToFractions(int64 time_us,
+ uint32* seconds,
+ uint32* fractions) {
+ *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
+ *fractions = static_cast<uint32>(
+ (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
+}
+
+inline void ConvertTimeToNtp(const base::TimeTicks& time,
+ uint32* ntp_seconds,
+ uint32* ntp_fractions) {
+ int64 time_us = time.ToInternalValue() - kNtpEpochDeltaMicroseconds;
+ ConvertTimeToFractions(time_us, ntp_seconds, ntp_fractions);
+}
+
+inline base::TimeTicks ConvertNtpToTime(uint32 ntp_seconds,
+ uint32 ntp_fractions) {
+ int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
+ base::Time::kMicrosecondsPerSecond;
+ ntp_time_us += static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
+ return base::TimeTicks::FromInternalValue(ntp_time_us +
+ kNtpEpochDeltaMicroseconds);
+}
+
} // namespace cast
} // namespace media
diff --git a/media/cast/cast_sender.gyp b/media/cast/cast_sender.gyp
index c41bd64..9206dd0 100644
--- a/media/cast/cast_sender.gyp
+++ b/media/cast/cast_sender.gyp
@@ -6,7 +6,7 @@
'includes': [
# 'audio_sender/audio_sender.gypi',
'congestion_control/congestion_control.gypi',
-# 'video_sender/video_sender.gypi',
+ 'video_sender/video_sender.gypi',
],
'targets': [
{
@@ -23,7 +23,7 @@
'pacing/paced_sender.gyp:paced_sender',
'rtcp/rtcp.gyp:cast_rtcp',
'rtp_sender/rtp_sender.gyp:cast_rtp_sender',
-# 'video_sender',
+ 'video_sender',
], # dependencies
},
],
diff --git a/media/cast/cast_sender.h b/media/cast/cast_sender.h
index b5a3bcb..254370b 100644
--- a/media/cast/cast_sender.h
+++ b/media/cast/cast_sender.h
@@ -1,38 +1,70 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// This is the main interface for the cast sender. All configuration are done
+// at creation.
+//
+// The FrameInput and PacketReciever interfaces should normally be accessed from
+// the IO thread. However they are allowed to be called from any thread.
#ifndef MEDIA_CAST_CAST_SENDER_H_
#define MEDIA_CAST_CAST_SENDER_H_
#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
namespace media {
namespace cast {
+// This Class is thread safe.
class FrameInput {
public:
- virtual void InsertRawVideoFrame(const I420VideoFrame& video_frame,
- base::TimeTicks capture_time) = 0;
+ // The video_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ virtual void InsertRawVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) = 0;
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame& video_frame,
- base::TimeTicks capture_time) = 0;
+ // The video_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) = 0;
- virtual void InsertRawAudioFrame(const PcmAudioFrame& audio_frame,
- base::TimeTicks recorded_time) = 0;
+ // The audio_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ virtual void InsertRawAudioFrame(const PcmAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) = 0;
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame& audio_frame,
- base::TimeTicks recorded_time) = 0;
+ // The audio_frame must be valid until the callback is called.
+ // The callback is called from the main cast thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time,
+ const base::Closure callback) = 0;
protected:
virtual ~FrameInput() {}
};
+// This Class is thread safe.
class CastSender {
public:
static CastSender* CreateCastSender(
+ scoped_refptr<CastThread> cast_thread,
const AudioSenderConfig& audio_config,
const VideoSenderConfig& video_config,
VideoEncoderController* const video_encoder_controller,
@@ -40,12 +72,14 @@ class CastSender {
virtual ~CastSender() {};
- virtual FrameInput* frame_input() = 0;
+ // All audio and video frames for the session should be inserted to this
+ // object.
+ // Can be called from any thread.
+ virtual scoped_refptr<FrameInput> frame_input() = 0;
- // All RTCP packets for the call should be inserted to this
- // PacketReceiver. The PacketReceiver pointer is valid as long as the
- // CastSender instance exists.
- virtual PacketReceiver* packet_receiver() = 0;
+ // All RTCP packets for the session should be inserted to this object.
+ // Can be called from any thread.
+ virtual scoped_refptr<PacketReceiver> packet_receiver() = 0;
};
} // namespace cast
diff --git a/media/cast/cast_thread.cc b/media/cast/cast_thread.cc
new file mode 100644
index 0000000..62de8f1
--- /dev/null
+++ b/media/cast/cast_thread.cc
@@ -0,0 +1,62 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/cast_thread.h"
+
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+CastThread::CastThread(
+ scoped_refptr<MessageLoopProxy> main_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_decode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_decode_thread_proxy)
+ : main_thread_proxy_(main_thread_proxy),
+ audio_encode_thread_proxy_(audio_encode_thread_proxy),
+ audio_decode_thread_proxy_(audio_decode_thread_proxy),
+ video_encode_thread_proxy_(video_encode_thread_proxy),
+ video_decode_thread_proxy_(video_decode_thread_proxy) {
+ DCHECK(main_thread_proxy) << "Main thread required";
+}
+
+bool CastThread::PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task) {
+ scoped_refptr<base::MessageLoopProxy> message_proxy =
+ GetMessageLoopProxyForThread(identifier);
+
+ return message_proxy->PostTask(from_here, task);
+}
+
+bool CastThread::PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay) {
+ scoped_refptr<base::MessageLoopProxy> message_proxy =
+ GetMessageLoopProxyForThread(identifier);
+
+ return message_proxy->PostDelayedTask(from_here, task, delay);
+}
+
+scoped_refptr<base::MessageLoopProxy> CastThread::GetMessageLoopProxyForThread(
+ ThreadId identifier) {
+ switch (identifier) {
+ case CastThread::MAIN:
+ return main_thread_proxy_;
+ case CastThread::AUDIO_ENCODER:
+ return audio_encode_thread_proxy_;
+ case CastThread::AUDIO_DECODER:
+ return audio_decode_thread_proxy_;
+ case CastThread::VIDEO_ENCODER:
+ return video_encode_thread_proxy_;
+ case CastThread::VIDEO_DECODER:
+ return video_decode_thread_proxy_;
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/cast_thread.h b/media/cast/cast_thread.h
new file mode 100644
index 0000000..7ce8588
--- /dev/null
+++ b/media/cast/cast_thread.h
@@ -0,0 +1,73 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_CAST_THREAD_H_
+#define MEDIA_CAST_CAST_THREAD_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop_proxy.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace cast {
+
+using base::MessageLoopProxy;
+
+class CastThread : public base::RefCountedThreadSafe<CastThread> {
+ public:
+ // An enumeration of the cast threads.
+ enum ThreadId {
+ // The main thread is where the cast system is configured and where timers
+ // and network IO is performed.
+ MAIN,
+ // The audio encoder thread is where all send side audio processing is done,
+ // primarily encoding but also re-sampling.
+ AUDIO_ENCODER,
+ // The audio decoder thread is where all receive side audio processing is
+ // done, primarily decoding but also error concealment and re-sampling.
+ AUDIO_DECODER,
+ // The video encoder thread is where the video encode processing is done.
+ VIDEO_ENCODER,
+ // The video decoder thread is where the video decode processing is done.
+ VIDEO_DECODER,
+ };
+
+ CastThread(scoped_refptr<MessageLoopProxy> main_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> audio_decode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_encode_thread_proxy,
+ scoped_refptr<MessageLoopProxy> video_decode_thread_proxy);
+
+ // These are the same methods in message_loop.h, but are guaranteed to either
+ // get posted to the MessageLoop if it's still alive, or be deleted otherwise.
+ // They return true iff the thread existed and the task was posted. Note that
+ // even if the task is posted, there's no guarantee that it will run, since
+ // the target thread may already have a Quit message in its queue.
+ bool PostTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task);
+
+ bool PostDelayedTask(ThreadId identifier,
+ const tracked_objects::Location& from_here,
+ const base::Closure& task,
+ base::TimeDelta delay);
+
+ private:
+ scoped_refptr<base::MessageLoopProxy> GetMessageLoopProxyForThread(
+ ThreadId identifier);
+
+ scoped_refptr<MessageLoopProxy> main_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> audio_encode_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> audio_decode_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> video_encode_thread_proxy_;
+ scoped_refptr<MessageLoopProxy> video_decode_thread_proxy_;
+
+ DISALLOW_COPY_AND_ASSIGN(CastThread);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_CAST_THREAD_H_
diff --git a/media/cast/rtcp/rtcp.cc b/media/cast/rtcp/rtcp.cc
index 2ee2631..16956f7 100644
--- a/media/cast/rtcp/rtcp.cc
+++ b/media/cast/rtcp/rtcp.cc
@@ -21,9 +21,6 @@ static const int kMaxRttMs = 1000000; // 1000 seconds.
// Time limit for received RTCP messages when we stop using it for lip-sync.
static const int64 kMaxDiffSinceReceivedRtcpMs = 100000; // 100 seconds.
-// Magic fractional unit.
-static const double kMagicFractionalUnit = 4.294967296E3;
-
class LocalRtcpRttFeedback : public RtcpRttFeedback {
public:
explicit LocalRtcpRttFeedback(Rtcp* rtcp)
@@ -378,30 +375,6 @@ bool Rtcp::Rtt(base::TimeDelta* rtt,
return true;
}
-void Rtcp::ConvertTimeToFractions(int64 time_us,
- uint32* seconds,
- uint32* fractions) const {
- *seconds = static_cast<uint32>(time_us / base::Time::kMicrosecondsPerSecond);
- *fractions = static_cast<uint32>(
- (time_us % base::Time::kMicrosecondsPerSecond) * kMagicFractionalUnit);
-}
-
-void Rtcp::ConvertTimeToNtp(const base::TimeTicks& time,
- uint32* ntp_seconds,
- uint32* ntp_fractions) const {
- int64 time_us = time.ToInternalValue() - kNtpEpochDeltaMicroseconds;
- ConvertTimeToFractions(time_us, ntp_seconds, ntp_fractions);
-}
-
-base::TimeTicks Rtcp::ConvertNtpToTime(uint32 ntp_seconds,
- uint32 ntp_fractions) const {
- int64 ntp_time_us = static_cast<int64>(ntp_seconds) *
- base::Time::kMicrosecondsPerSecond;
- ntp_time_us += static_cast<int64>(ntp_fractions) / kMagicFractionalUnit;
- return base::TimeTicks::FromInternalValue(ntp_time_us +
- kNtpEpochDeltaMicroseconds);
-}
-
int Rtcp::CheckForWrapAround(uint32 new_timestamp,
uint32 old_timestamp) const {
if (new_timestamp < old_timestamp) {
diff --git a/media/cast/rtcp/rtcp.h b/media/cast/rtcp/rtcp.h
index 9cf9708..5b06b82 100644
--- a/media/cast/rtcp/rtcp.h
+++ b/media/cast/rtcp/rtcp.h
@@ -66,12 +66,6 @@ class RtpReceiverStatistics {
class Rtcp {
public:
- // Network Time Protocol (NTP), which is in seconds relative to 0h UTC on
- // 1 January 1900.
- static const int64 kNtpEpochDeltaSeconds = GG_INT64_C(9435484800);
- static const int64 kNtpEpochDeltaMicroseconds =
- kNtpEpochDeltaSeconds * base::Time::kMicrosecondsPerSecond;
-
Rtcp(RtcpSenderFeedback* sender_feedback,
PacedPacketSender* paced_packet_sender,
RtpSenderStatistics* rtp_sender_statistics,
@@ -104,19 +98,13 @@ class Rtcp {
}
protected:
- void ConvertTimeToNtp(const base::TimeTicks& time,
- uint32* ntp_seconds,
- uint32* ntp_fractions) const;
-
- base::TimeTicks ConvertNtpToTime(uint32 ntp_seconds,
- uint32 ntp_fractions) const;
-
int CheckForWrapAround(uint32 new_timestamp,
uint32 old_timestamp) const;
void OnReceivedLipSyncInfo(uint32 rtp_timestamp,
uint32 ntp_seconds,
uint32 ntp_fraction);
+
private:
friend class LocalRtcpRttFeedback;
friend class LocalRtcpReceiverFeedback;
@@ -137,24 +125,8 @@ class Rtcp {
void UpdateRtt(const base::TimeDelta& sender_delay,
const base::TimeDelta& receiver_delay);
- void ConvertTimeToFractions(int64 time_us,
- uint32* seconds,
- uint32* fractions) const;
-
void UpdateNextTimeToSendRtcp();
- inline uint32 ConvertToNtpDiff(uint32 delay_seconds, uint32 delay_fraction) {
- return ((delay_seconds & 0x0000FFFF) << 16) +
- ((delay_fraction & 0xFFFF0000) >> 16);
- }
-
- inline base::TimeDelta ConvertFromNtpDiff(uint32 ntp_delay) {
- uint32 delay_ms = (ntp_delay & 0x0000ffff) * 1000;
- delay_ms /= 65536;
- delay_ms += ((ntp_delay & 0xffff0000) >> 16) * 1000;
- return base::TimeDelta::FromMilliseconds(delay_ms);
- }
-
const base::TimeDelta rtcp_interval_;
const RtcpMode rtcp_mode_;
const bool sending_media_;
diff --git a/media/cast/rtcp/rtcp_unittest.cc b/media/cast/rtcp/rtcp_unittest.cc
index 049fbeb..dfcc6ea 100644
--- a/media/cast/rtcp/rtcp_unittest.cc
+++ b/media/cast/rtcp/rtcp_unittest.cc
@@ -84,8 +84,6 @@ class RtcpPeer : public Rtcp {
c_name) {
}
- using Rtcp::ConvertTimeToNtp;
- using Rtcp::ConvertNtpToTime;
using Rtcp::CheckForWrapAround;
using Rtcp::OnReceivedLipSyncInfo;
};
@@ -308,15 +306,14 @@ TEST_F(RtcpTest, NtpAndTime) {
kReceiverSsrc,
kCName);
rtcp_peer.set_clock(&testing_clock_);
- int64 input_time_us = 12345678901000LL + Rtcp::kNtpEpochDeltaMicroseconds;
uint32 ntp_seconds = 0;
uint32 ntp_fractions = 0;
- base::TimeTicks input_time =
- base::TimeTicks::FromInternalValue(input_time_us);
- rtcp_peer.ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
+ 12345678901000LL + kNtpEpochDeltaMicroseconds);
+ ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
EXPECT_EQ(12345678u, ntp_seconds);
EXPECT_EQ(input_time,
- rtcp_peer.ConvertNtpToTime(ntp_seconds, ntp_fractions));
+ ConvertNtpToTime(ntp_seconds, ntp_fractions));
}
TEST_F(RtcpTest, WrapAround) {
@@ -366,14 +363,13 @@ TEST_F(RtcpTest, RtpTimestampInSenderTime) {
EXPECT_FALSE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
&rtp_timestamp_in_ticks));
- int64 input_time_us = 12345678901000LL + Rtcp::kNtpEpochDeltaMicroseconds;
uint32 ntp_seconds = 0;
uint32 ntp_fractions = 0;
- base::TimeTicks input_time =
- base::TimeTicks::FromInternalValue(input_time_us);
+ base::TimeTicks input_time = base::TimeTicks::FromInternalValue(
+ 12345678901000LL + kNtpEpochDeltaMicroseconds);
// Test exact match.
- rtcp_peer.ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
+ ConvertTimeToNtp(input_time, &ntp_seconds, &ntp_fractions);
rtcp_peer.OnReceivedLipSyncInfo(rtp_timestamp, ntp_seconds, ntp_fractions);
EXPECT_TRUE(rtcp_peer.RtpTimestampInSenderTime(frequency, rtp_timestamp,
&rtp_timestamp_in_ticks));
diff --git a/media/cast/rtp_common/rtp_defines.h b/media/cast/rtp_common/rtp_defines.h
index 89ee019..2268fa9 100644
--- a/media/cast/rtp_common/rtp_defines.h
+++ b/media/cast/rtp_common/rtp_defines.h
@@ -5,8 +5,6 @@
#ifndef MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
#define MEDIA_CAST_RTP_COMMON_RTP_DEFINES_H_
-#include <cstring>
-
#include "base/basictypes.h"
#include "media/cast/cast_config.h"
#include "media/cast/rtcp/rtcp_defines.h"
@@ -45,26 +43,6 @@ class RtpPayloadFeedback {
virtual ~RtpPayloadFeedback() {}
};
-inline bool IsNewerFrameId(uint8 frame_id, uint8 prev_frame_id) {
- return (frame_id != prev_frame_id) &&
- static_cast<uint8>(frame_id - prev_frame_id) < 0x80;
-}
-
-inline bool IsOlderFrameId(uint8 frame_id, uint8 prev_frame_id) {
- return (frame_id == prev_frame_id) || IsNewerFrameId(prev_frame_id, frame_id);
-}
-
-inline bool IsNewerPacketId(uint16 packet_id, uint16 prev_packet_id) {
- return (packet_id != prev_packet_id) &&
- static_cast<uint16>(packet_id - prev_packet_id) < 0x8000;
-}
-
-inline bool IsNewerSequenceNumber(uint16 sequence_number,
- uint16 prev_sequence_number) {
- // Same function as IsNewerPacketId just different data and name.
- return IsNewerPacketId(sequence_number, prev_sequence_number);
-}
-
} // namespace cast
} // namespace media
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
index 264701c..6900bc2 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -23,7 +23,6 @@ RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
: config_(rtp_packetizer_config),
transport_(transport),
packet_storage_(packet_storage),
- time_last_sent_rtp_timestamp_(0),
sequence_number_(config_.sequence_number),
rtp_timestamp_(config_.rtp_timestamp),
frame_id_(0),
@@ -36,30 +35,33 @@ RtpPacketizer::RtpPacketizer(PacedPacketSender* transport,
RtpPacketizer::~RtpPacketizer() {}
void RtpPacketizer::IncomingEncodedVideoFrame(
- const EncodedVideoFrame& video_frame,
- int64 capture_time_ms) {
+ const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time) {
DCHECK(!config_.audio) << "Invalid state";
if (config_.audio) return;
+ base::TimeTicks zero_time;
+ base::TimeDelta capture_delta = capture_time - zero_time;
+
// Timestamp is in 90 KHz for video.
- rtp_timestamp_ = static_cast<uint32>(capture_time_ms * 90);
- time_last_sent_rtp_timestamp_ = capture_time_ms;
+ rtp_timestamp_ = static_cast<uint32>(capture_delta.InMilliseconds() * 90);
+ time_last_sent_rtp_timestamp_ = capture_time;
- Cast(video_frame.key_frame,
- video_frame.last_referenced_frame_id,
+ Cast(video_frame->key_frame,
+ video_frame->last_referenced_frame_id,
rtp_timestamp_,
- video_frame.data);
+ video_frame->data);
}
void RtpPacketizer::IncomingEncodedAudioFrame(
- const EncodedAudioFrame& audio_frame,
- int64 recorded_time) {
+ const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time) {
DCHECK(config_.audio) << "Invalid state";
if (!config_.audio) return;
- rtp_timestamp_ += audio_frame.samples; // Timestamp is in samples for audio.
+ rtp_timestamp_ += audio_frame->samples; // Timestamp is in samples for audio.
time_last_sent_rtp_timestamp_ = recorded_time;
- Cast(true, 0, rtp_timestamp_, audio_frame.data);
+ Cast(true, 0, rtp_timestamp_, audio_frame->data);
}
uint16 RtpPacketizer::NextSequenceNumber() {
@@ -67,9 +69,9 @@ uint16 RtpPacketizer::NextSequenceNumber() {
return sequence_number_ - 1;
}
-bool RtpPacketizer::LastSentTimestamp(int64* time_sent,
+bool RtpPacketizer::LastSentTimestamp(base::TimeTicks* time_sent,
uint32* rtp_timestamp) const {
- if (time_last_sent_rtp_timestamp_ == 0) return false;
+ if (time_last_sent_rtp_timestamp_.is_null()) return false;
*time_sent = time_last_sent_rtp_timestamp_;
*rtp_timestamp = rtp_timestamp_;
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
index f1941cd0..3ada98b 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -9,6 +9,7 @@
#include <list>
#include <map>
+#include "base/time/time.h"
#include "media/cast/rtp_common/rtp_defines.h"
#include "media/cast/rtp_sender/packet_storage/packet_storage.h"
#include "media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_config.h"
@@ -18,6 +19,7 @@ namespace cast {
class PacedPacketSender;
+// This object is only called from the main cast thread.
class RtpPacketizer {
public:
RtpPacketizer(PacedPacketSender* transport,
@@ -25,38 +27,42 @@ class RtpPacketizer {
RtpPacketizerConfig rtp_packetizer_config);
~RtpPacketizer();
- void IncomingEncodedVideoFrame(const EncodedVideoFrame& video_frame,
- int64 capture_time);
+ // The video_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
- void IncomingEncodedAudioFrame(const EncodedAudioFrame& audio_frame,
- int64 recorded_time);
+ // The audio_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time);
- bool LastSentTimestamp(int64* time_sent, uint32* rtp_timestamp) const;
+ bool LastSentTimestamp(base::TimeTicks* time_sent,
+ uint32* rtp_timestamp) const;
// Return the next sequence number, and increment by one. Enables unique
// incremental sequence numbers for every packet (including retransmissions).
uint16 NextSequenceNumber();
- uint32 send_packets_count() {return send_packets_count_;}
- uint32 send_octet_count() {return send_octet_count_;}
+ int send_packets_count() { return send_packets_count_; }
+ int send_octet_count() { return send_octet_count_; }
private:
void Cast(bool is_key, uint8 reference_frame_id,
uint32 timestamp, std::vector<uint8> data);
void BuildCommonRTPheader(std::vector<uint8>* packet, bool marker_bit,
uint32 time_stamp);
+
RtpPacketizerConfig config_;
PacedPacketSender* transport_;
PacketStorage* packet_storage_;
- int64 time_last_sent_rtp_timestamp_;
+ base::TimeTicks time_last_sent_rtp_timestamp_;
uint16 sequence_number_;
uint32 rtp_timestamp_;
uint8 frame_id_;
uint16 packet_id_;
- uint32 send_packets_count_;
- uint32 send_octet_count_;
+ int send_packets_count_;
+ int send_octet_count_;
};
} // namespace cast
diff --git a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index 7d99a46..bed7cba 100644
--- a/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -17,7 +17,7 @@ namespace media {
namespace cast {
static const int kPayload = 127;
-static const uint32 kTimestamp = 10;
+static const uint32 kTimestampMs = 10;
static const uint16 kSeqNum = 33;
static const int kTimeOffset = 22222;
static const int kMaxPacketLength = 1500;
@@ -46,7 +46,7 @@ class TestRtpPacketTransport : public PacedPacketSender {
rtp_header.webrtc.header.markerBit);
EXPECT_EQ(kPayload, rtp_header.webrtc.header.payloadType);
EXPECT_EQ(sequence_number_, rtp_header.webrtc.header.sequenceNumber);
- EXPECT_EQ(kTimestamp * 90, rtp_header.webrtc.header.timestamp);
+ EXPECT_EQ(kTimestampMs * 90, rtp_header.webrtc.header.timestamp);
EXPECT_EQ(config_.ssrc, rtp_header.webrtc.header.ssrc);
EXPECT_EQ(0, rtp_header.webrtc.header.numCSRCs);
}
@@ -121,7 +121,10 @@ class RtpPacketizerTest : public ::testing::Test {
TEST_F(RtpPacketizerTest, SendStandardPackets) {
int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
- rtp_packetizer_->IncomingEncodedVideoFrame(video_frame_, kTimestamp);
+
+ base::TimeTicks time;
+ time += base::TimeDelta::FromMilliseconds(kTimestampMs);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,time);
}
TEST_F(RtpPacketizerTest, Stats) {
@@ -130,7 +133,10 @@ TEST_F(RtpPacketizerTest, Stats) {
// Insert packets at varying lengths.
unsigned int expected_num_of_packets = kFrameSize / kMaxPacketLength + 1;
transport_->SetExpectedNumberOfPackets(expected_num_of_packets);
- rtp_packetizer_->IncomingEncodedVideoFrame(video_frame_, kTimestamp);
+
+ base::TimeTicks time;
+ time += base::TimeDelta::FromMilliseconds(kTimestampMs);
+ rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packets_count());
EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
}
diff --git a/media/cast/rtp_sender/rtp_sender.cc b/media/cast/rtp_sender/rtp_sender.cc
index c735dd0e..ecaae40 100644
--- a/media/cast/rtp_sender/rtp_sender.cc
+++ b/media/cast/rtp_sender/rtp_sender.cc
@@ -12,25 +12,6 @@
namespace media {
namespace cast {
-namespace {
-
-// January 1970, in milliseconds.
-static const int64 kNtpJan1970 = 2208988800000LL;
-
-// Magic fractional unit.
-static const uint32 kMagicFractionalUnit = 4294967;
-
-void ConvertTimeToFractions(int64 time_ms, uint32* seconds,
- uint32* fractions) {
- *seconds = static_cast<uint32>(time_ms / 1000);
- *fractions = static_cast<uint32>((time_ms % 1000) * kMagicFractionalUnit);
-}
-
-void ConvertTimeToNtp(int64 time_ms, uint32* ntp_seconds,
- uint32* ntp_fractions) {
- ConvertTimeToFractions(time_ms + kNtpJan1970, ntp_seconds, ntp_fractions);
-}
-} // namespace
RtpSender::RtpSender(const AudioSenderConfig* audio_config,
const VideoSenderConfig* video_config,
@@ -65,13 +46,13 @@ RtpSender::RtpSender(const AudioSenderConfig* audio_config,
RtpSender::~RtpSender() {}
-void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame& video_frame,
- int64 capture_time) {
+void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time) {
packetizer_->IncomingEncodedVideoFrame(video_frame, capture_time);
}
-void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame& audio_frame,
- int64 recorded_time) {
+void RtpSender::IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time) {
packetizer_->IncomingEncodedAudioFrame(audio_frame, recorded_time);
}
@@ -107,7 +88,6 @@ void RtpSender::ResendPackets(
}
} while (success);
-
} else {
for (std::set<uint16>::const_iterator set_it = packets.begin();
set_it != packets.end(); ++set_it) {
@@ -138,23 +118,24 @@ void RtpSender::UpdateSequenceNumber(std::vector<uint8>* packet) {
(*packet)[index + 1] =(static_cast<uint8>(new_sequence_number >> 8));
}
-void RtpSender::RtpStatistics(int64 now_ms, RtcpSenderInfo* sender_info) {
+void RtpSender::RtpStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) {
// The timestamp of this Rtcp packet should be estimated as the timestamp of
// the frame being captured at this moment. We are calculating that
// timestamp as the last frame's timestamp + the time since the last frame
// was captured.
uint32 ntp_seconds = 0;
uint32 ntp_fraction = 0;
- ConvertTimeToNtp(now_ms, &ntp_seconds, &ntp_fraction);
+ ConvertTimeToNtp(now, &ntp_seconds, &ntp_fraction);
// sender_info->ntp_seconds = ntp_seconds;
sender_info->ntp_fraction = ntp_fraction;
- int64 time_sent_ms;
+ base::TimeTicks time_sent;
uint32 rtp_timestamp;
- if (packetizer_->LastSentTimestamp(&time_sent_ms, &rtp_timestamp)) {
- int64 time_since_last_send_ms = now_ms - time_sent_ms;
+ if (packetizer_->LastSentTimestamp(&time_sent, &rtp_timestamp)) {
+ base::TimeDelta time_since_last_send = now - time_sent;
sender_info->rtp_timestamp = rtp_timestamp +
- time_since_last_send_ms * (config_.frequency / 1000);
+ time_since_last_send.InMilliseconds() * (config_.frequency / 1000);
} else {
sender_info->rtp_timestamp = 0;
}
diff --git a/media/cast/rtp_sender/rtp_sender.h b/media/cast/rtp_sender/rtp_sender.h
index 9352fd3..f6e59ac 100644
--- a/media/cast/rtp_sender/rtp_sender.h
+++ b/media/cast/rtp_sender/rtp_sender.h
@@ -27,6 +27,10 @@ struct RtcpSenderInfo;
typedef std::map<uint8, std::set<uint16> > MissingFramesAndPackets;
+// This object is only called from the main cast thread.
+// This class handles splitting encoded audio and video frames into packets and
+// add an RTP header to each packet. The sent packets are stored until they are
+// acknowledged by the remote peer or timed out.
class RtpSender {
public:
RtpSender(const AudioSenderConfig* audio_config,
@@ -35,16 +39,23 @@ class RtpSender {
~RtpSender();
- void IncomingEncodedVideoFrame(const EncodedVideoFrame& video_frame,
- int64 capture_time);
+ // The video_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
- void IncomingEncodedAudioFrame(const EncodedAudioFrame& audio_frame,
- int64 recorded_time);
+ // The audio_frame objects ownership is handled by the main cast thread.
+ void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
+ const base::TimeTicks& recorded_time);
- void ResendPackets(
- const MissingFramesAndPackets& missing_frames_and_packets);
+ void ResendPackets(const MissingFramesAndPackets& missing_packets);
- void RtpStatistics(int64 now_ms, RtcpSenderInfo* sender_info);
+ void RtpStatistics(const base::TimeTicks& now, RtcpSenderInfo* sender_info);
+
+ // Used for testing.
+ void set_clock(base::TickClock* clock) {
+ // TODO(pwestin): review how we pass in a clock for testing.
+ clock_ = clock;
+ }
private:
void UpdateSequenceNumber(std::vector<uint8>* packet);
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
new file mode 100644
index 0000000..d24ef4b
--- /dev/null
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -0,0 +1,352 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// TODO (pwestin): add a link to the design document describing the generic
+// protocol and the VP8 specific details.
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/rtp_common/rtp_defines.h"
+#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
+
+namespace media {
+namespace cast {
+
+static const uint32 kMinIntra = 300;
+
+Vp8Encoder::Vp8Encoder(const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames)
+ : cast_config_(video_config),
+ use_multiple_video_buffers_(
+ cast_config_.max_number_of_video_buffers_used ==
+ kNumberOfVp8VideoBuffers),
+ max_number_of_repeated_buffers_in_a_row_(
+ (max_unacked_frames > kNumberOfVp8VideoBuffers) ?
+ ((max_unacked_frames - 1) / kNumberOfVp8VideoBuffers) : 0),
+ config_(new vpx_codec_enc_cfg_t()),
+ encoder_(new vpx_codec_ctx_t()),
+ // Creating a wrapper to the image - setting image data to NULL. Actual
+ // pointer will be set during encode. Setting align to 1, as it is
+ // meaningless (actual memory is not allocated).
+ raw_image_(vpx_img_wrap(NULL, IMG_FMT_I420, video_config.width,
+ video_config.height, 1, NULL)),
+ key_frame_requested_(true),
+ timestamp_(0),
+ last_encoded_frame_id_(kStartFrameId),
+ number_of_repeated_buffers_(0) {
+ // VP8 have 3 buffers available for prediction, with
+ // max_number_of_video_buffers_used set to 1 we maximize the coding efficiency
+ // however in this mode we can not skip frames in the receiver to catch up
+ // after a temporary network outage; with max_number_of_video_buffers_used
+ // set to 3 we allow 2 frames to be skipped by the receiver without error
+ // propagation.
+ DCHECK(cast_config_.max_number_of_video_buffers_used == 1 ||
+ cast_config_.max_number_of_video_buffers_used ==
+ kNumberOfVp8VideoBuffers) << "Invalid argument";
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ acked_frame_buffers_[i] = true;
+ used_buffers_frame_id_[i] = kStartFrameId;
+ }
+ InitEncode(video_config.number_of_cores);
+}
+
+Vp8Encoder::~Vp8Encoder() {
+ vpx_codec_destroy(encoder_);
+ vpx_img_free(raw_image_);
+}
+
+void Vp8Encoder::InitEncode(int number_of_cores) {
+ // Populate encoder configuration with default values.
+ if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_.get(), 0)) {
+ DCHECK(false) << "Invalid return value";
+ }
+ config_->g_w = cast_config_.width;
+ config_->g_h = cast_config_.height;
+ config_->rc_target_bitrate = cast_config_.start_bitrate / 1000; // In kbit/s.
+
+ // Setting the codec time base.
+ config_->g_timebase.num = 1;
+ config_->g_timebase.den = kVideoFrequency;
+ config_->g_lag_in_frames = 0;
+ config_->kf_mode = VPX_KF_DISABLED;
+ if (use_multiple_video_buffers_) {
+ // We must enable error resilience when we use multiple buffers, due to
+ // codec requirements.
+ config_->g_error_resilient = 1;
+ }
+
+ if (cast_config_.width * cast_config_.height > 640 * 480
+ && number_of_cores >= 2) {
+ config_->g_threads = 2; // 2 threads for qHD/HD.
+ } else {
+ config_->g_threads = 1; // 1 thread for VGA or less.
+ }
+
+ // Rate control settings.
+ // TODO(pwestin): revisit these constants. Currently identical to webrtc.
+ config_->rc_dropframe_thresh = 30;
+ config_->rc_end_usage = VPX_CBR;
+ config_->g_pass = VPX_RC_ONE_PASS;
+ config_->rc_resize_allowed = 0;
+ config_->rc_min_quantizer = cast_config_.min_qp;
+ config_->rc_max_quantizer = cast_config_.max_qp;
+ config_->rc_undershoot_pct = 100;
+ config_->rc_overshoot_pct = 15;
+ config_->rc_buf_initial_sz = 500;
+ config_->rc_buf_optimal_sz = 600;
+ config_->rc_buf_sz = 1000;
+
+ // set the maximum target size of any key-frame.
+ uint32 rc_max_intra_target = MaxIntraTarget(config_->rc_buf_optimal_sz);
+ vpx_codec_flags_t flags = 0;
+ // TODO(mikhal): Tune settings.
+ if (vpx_codec_enc_init(encoder_, vpx_codec_vp8_cx(), config_.get(), flags)) {
+ DCHECK(false) << "Invalid return value";
+ }
+ vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
+ vpx_codec_control(encoder_, VP8E_SET_NOISE_SENSITIVITY, 0);
+ vpx_codec_control(encoder_, VP8E_SET_CPUUSED, -6);
+ vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
+ rc_max_intra_target);
+}
+
+bool Vp8Encoder::Encode(const I420VideoFrame& input_image,
+ EncodedVideoFrame* encoded_image) {
+ // Image in vpx_image_t format.
+ // Input image is const. VP8's raw image is not defined as const.
+ raw_image_->planes[PLANE_Y] = const_cast<uint8*>(input_image.y_plane.data);
+ raw_image_->planes[PLANE_U] = const_cast<uint8*>(input_image.u_plane.data);
+ raw_image_->planes[PLANE_V] = const_cast<uint8*>(input_image.v_plane.data);
+
+ raw_image_->stride[VPX_PLANE_Y] = input_image.y_plane.stride;
+ raw_image_->stride[VPX_PLANE_U] = input_image.u_plane.stride;
+ raw_image_->stride[VPX_PLANE_V] = input_image.v_plane.stride;
+
+ uint8 latest_frame_id_to_reference;
+ Vp8Buffers buffer_to_update;
+ vpx_codec_flags_t flags = 0;
+ if (key_frame_requested_) {
+ flags = VPX_EFLAG_FORCE_KF;
+ // Self reference.
+ latest_frame_id_to_reference =
+ static_cast<uint8>(last_encoded_frame_id_ + 1);
+ // We can pick any buffer as buffer_to_update since we update
+ // them all.
+ buffer_to_update = kLastBuffer;
+ } else {
+ // Reference all acked frames (buffers).
+ latest_frame_id_to_reference = GetLatestFrameIdToReference();
+ GetCodecReferenceFlags(&flags);
+ buffer_to_update = GetNextBufferToUpdate();
+ GetCodecUpdateFlags(buffer_to_update, &flags);
+ }
+
+ // Note: The duration does not reflect the real time between frames. This is
+ // done to keep the encoder happy.
+ uint32 duration = kVideoFrequency / cast_config_.max_frame_rate;
+ if (vpx_codec_encode(encoder_, raw_image_, timestamp_, duration, flags,
+ VPX_DL_REALTIME)) {
+ return false;
+ }
+ timestamp_ += duration;
+
+ // Get encoded frame.
+ const vpx_codec_cx_pkt_t *pkt = NULL;
+ vpx_codec_iter_t iter = NULL;
+ int total_size = 0;
+ while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
+ if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
+ total_size += pkt->data.frame.sz;
+ encoded_image->data.reserve(total_size);
+ encoded_image->data.insert(
+ encoded_image->data.end(),
+ static_cast<const uint8*>(pkt->data.frame.buf),
+ static_cast<const uint8*>(pkt->data.frame.buf) +
+ pkt->data.frame.sz);
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ encoded_image->key_frame = true;
+ } else {
+ encoded_image->key_frame = false;
+ }
+ }
+ }
+ // Don't update frame_id for zero size frames.
+ if (total_size == 0) return true;
+
+ // Populate the encoded frame.
+ encoded_image->codec = kVp8;
+ encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
+ encoded_image->frame_id = ++last_encoded_frame_id_;
+
+ if (encoded_image->key_frame) {
+ key_frame_requested_ = false;
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ used_buffers_frame_id_[i] = encoded_image->frame_id;
+ }
+ // We can pick any buffer as last_used_vp8_buffer_ since we update
+ // them all.
+ last_used_vp8_buffer_ = buffer_to_update;
+ } else {
+ if (buffer_to_update != kNoBuffer) {
+ acked_frame_buffers_[buffer_to_update] = false;
+ used_buffers_frame_id_[buffer_to_update] = encoded_image->frame_id;
+ last_used_vp8_buffer_ = buffer_to_update;
+ }
+ }
+ return true;
+}
+
+void Vp8Encoder::GetCodecReferenceFlags(vpx_codec_flags_t* flags) {
+ if (!use_multiple_video_buffers_) return;
+
+ // We need to reference something.
+ DCHECK(acked_frame_buffers_[kAltRefBuffer] ||
+ acked_frame_buffers_[kGoldenBuffer] ||
+ acked_frame_buffers_[kLastBuffer]) << "Invalid state";
+
+ if (!acked_frame_buffers_[kAltRefBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_ARF;
+ }
+ if (!acked_frame_buffers_[kGoldenBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_GF;
+ }
+ if (!acked_frame_buffers_[kLastBuffer]) {
+ *flags |= VP8_EFLAG_NO_REF_LAST;
+ }
+}
+
+uint8 Vp8Encoder::GetLatestFrameIdToReference() {
+ if (!use_multiple_video_buffers_) return last_encoded_frame_id_;
+
+ int latest_frame_id_to_reference = -1;
+ if (acked_frame_buffers_[kAltRefBuffer]) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kAltRefBuffer];
+ }
+ if (acked_frame_buffers_[kGoldenBuffer]) {
+ if (latest_frame_id_to_reference == -1) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kGoldenBuffer];
+ } else {
+ if (IsNewerFrameId(used_buffers_frame_id_[kGoldenBuffer],
+ latest_frame_id_to_reference)) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kGoldenBuffer];
+ }
+ }
+ }
+ if (acked_frame_buffers_[kLastBuffer]) {
+ if (latest_frame_id_to_reference == -1) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kLastBuffer];
+ } else {
+ if (IsNewerFrameId(used_buffers_frame_id_[kLastBuffer],
+ latest_frame_id_to_reference)) {
+ latest_frame_id_to_reference = used_buffers_frame_id_[kLastBuffer];
+ }
+ }
+ }
+ DCHECK(latest_frame_id_to_reference != -1) << "Invalid state";
+ return static_cast<uint8>(latest_frame_id_to_reference);
+}
+
+Vp8Encoder::Vp8Buffers Vp8Encoder::GetNextBufferToUpdate() {
+ // Update at most one buffer, except for key-frames.
+
+ Vp8Buffers buffer_to_update;
+ if (number_of_repeated_buffers_ < max_number_of_repeated_buffers_in_a_row_) {
+ // TODO(pwestin): experiment with this. The issue with only this change is
+ // that we can end up with only 4 frames in flight when we expect 6.
+ // buffer_to_update = last_used_vp8_buffer_;
+ buffer_to_update = kNoBuffer;
+ ++number_of_repeated_buffers_;
+ } else {
+ number_of_repeated_buffers_ = 0;
+ switch (last_used_vp8_buffer_) {
+ case kAltRefBuffer:
+ buffer_to_update = kLastBuffer;
+ break;
+ case kLastBuffer:
+ buffer_to_update = kGoldenBuffer;
+ break;
+ case kGoldenBuffer:
+ buffer_to_update = kAltRefBuffer;
+ break;
+ case kNoBuffer:
+ DCHECK(false) << "Invalid state";
+ break;
+ }
+ }
+ return buffer_to_update;
+}
+
+void Vp8Encoder::GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
+ vpx_codec_flags_t* flags) {
+ if (!use_multiple_video_buffers_) return;
+
+ // Update at most one buffer, except for key-frames.
+ switch (buffer_to_update) {
+ case kAltRefBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kLastBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ break;
+ case kGoldenBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kNoBuffer:
+ *flags |= VP8_EFLAG_NO_UPD_ARF;
+ *flags |= VP8_EFLAG_NO_UPD_GF;
+ *flags |= VP8_EFLAG_NO_UPD_LAST;
+ *flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ }
+}
+
+void Vp8Encoder::UpdateRates(uint32 new_bitrate) {
+ config_->rc_target_bitrate = new_bitrate / 1000; // In kbit/s.
+ // Update encoder context.
+ if (vpx_codec_enc_config_set(encoder_, config_.get())) {
+ DCHECK(false) << "Invalid return value";
+ }
+}
+
+void Vp8Encoder::LatestFrameIdToReference(uint8 frame_id) {
+ if (!use_multiple_video_buffers_) return;
+
+ for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
+ if (frame_id == used_buffers_frame_id_[i]) {
+ acked_frame_buffers_[i] = true;
+ }
+ }
+}
+
+void Vp8Encoder::RequestKeyFrame() {
+ key_frame_requested_ = true;
+}
+
+// Calculate the max size of the key frame relative to a normal delta frame.
+uint32 Vp8Encoder::MaxIntraTarget(uint32 optimal_buffer_size_ms) const {
+ // Set max to the optimal buffer level (normalized by target BR),
+ // and scaled by a scale_parameter.
+ // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
+ // This values is presented in percentage of perFrameBw:
+ // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
+ // The target in % is as follows:
+
+ float scale_parameter = 0.5;
+ uint32 target_pct = optimal_buffer_size_ms * scale_parameter *
+ cast_config_.max_frame_rate / 10;
+
+ // Don't go below 3 times the per frame bandwidth.
+ return std::max(target_pct, kMinIntra);
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi b/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
new file mode 100644
index 0000000..0b12789
--- /dev/null
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.gypi
@@ -0,0 +1,19 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'cast_vp8_encoder',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'vp8_encoder.cc',
+ 'vp8_encoder.h',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ ],
+ },
+ ],
+}
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
new file mode 100644
index 0000000..354d529
--- /dev/null
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -0,0 +1,87 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/cast/cast_config.h"
+#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+
+// VPX forward declaration.
+typedef struct vpx_codec_ctx vpx_enc_ctx_t;
+
+namespace media {
+namespace cast {
+
+const int kNumberOfVp8VideoBuffers = 3;
+
+class Vp8Encoder {
+ public:
+ Vp8Encoder(const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames);
+
+ ~Vp8Encoder();
+
+ // Encode a raw image (as a part of a video stream).
+ bool Encode(const I420VideoFrame& input_image,
+ EncodedVideoFrame* encoded_image);
+
+ // Update the encoder with a new target bit rate.
+ void UpdateRates(uint32 new_bitrate);
+
+ // Set the next frame to be a key frame.
+ void RequestKeyFrame();
+
+ void LatestFrameIdToReference(uint8 frame_id);
+
+ private:
+ enum Vp8Buffers {
+ kAltRefBuffer = 0,
+ kGoldenBuffer = 1,
+ kLastBuffer = 2,
+ kNoBuffer = 3 // Note: must be last.
+ };
+
+ void InitEncode(int number_of_cores);
+
+ // Calculate the max target in % for a keyframe.
+ uint32 MaxIntraTarget(uint32 optimal_buffer_size) const;
+
+ // Calculate which next Vp8 buffers to update with the next frame.
+ Vp8Buffers GetNextBufferToUpdate();
+
+ // Calculate which previous frame to reference.
+ uint8_t GetLatestFrameIdToReference();
+
+ // Get encoder flags for our referenced encoder buffers.
+ void GetCodecReferenceFlags(vpx_codec_flags_t* flags);
+
+ // Get encoder flags for our encoder buffers to update with next frame.
+ void GetCodecUpdateFlags(Vp8Buffers buffer_to_update,
+ vpx_codec_flags_t* flags);
+
+ const VideoSenderConfig cast_config_;
+ const bool use_multiple_video_buffers_;
+ const int max_number_of_repeated_buffers_in_a_row_;
+
+ // VP8 internal objects.
+ scoped_ptr<vpx_codec_enc_cfg_t> config_;
+ vpx_enc_ctx_t* encoder_;
+ vpx_image_t* raw_image_;
+
+ bool key_frame_requested_;
+ int64 timestamp_;
+ uint8 last_encoded_frame_id_;
+ uint8 used_buffers_frame_id_[kNumberOfVp8VideoBuffers];
+ bool acked_frame_buffers_[kNumberOfVp8VideoBuffers];
+ Vp8Buffers last_used_vp8_buffer_;
+ int number_of_repeated_buffers_;
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_CODECS_VP8_VP8_ENCODER_H_
diff --git a/media/cast/video_sender/mock_video_encoder_controller.h b/media/cast/video_sender/mock_video_encoder_controller.h
new file mode 100644
index 0000000..90b2abd
--- /dev/null
+++ b/media/cast/video_sender/mock_video_encoder_controller.h
@@ -0,0 +1,31 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+#define MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+
+#include "media/cast/cast_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+class MockVideoEncoderController : public VideoEncoderController {
+ public:
+ MOCK_METHOD1(SetBitRate, void(int new_bit_rate));
+
+ MOCK_METHOD1(SkipNextFrame, void(bool skip_next_frame));
+
+ MOCK_METHOD0(GenerateKeyFrame, void());
+
+ MOCK_METHOD1(LatestFrameIdToReference, void(uint8 frame_id));
+
+ MOCK_CONST_METHOD0(NumberOfSkippedFrames, int());
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_MOCK_VIDEO_ENCODER_CONTROLLER_H_
+
diff --git a/media/cast/video_sender/video_encoder.cc b/media/cast/video_sender/video_encoder.cc
new file mode 100644
index 0000000..0b7202b
--- /dev/null
+++ b/media/cast/video_sender/video_encoder.cc
@@ -0,0 +1,112 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_encoder.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+VideoEncoder::VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames)
+ : video_config_(video_config),
+ cast_thread_(cast_thread),
+ skip_next_frame_(false),
+ skip_count_(0) {
+ if (video_config.codec == kVp8) {
+ vp8_encoder_.reset(new Vp8Encoder(video_config, max_unacked_frames));
+ } else {
+ DCHECK(false) << "Invalid config"; // Codec not supported.
+ }
+
+ dynamic_config_.key_frame_requested = false;
+ dynamic_config_.latest_frame_id_to_reference = kStartFrameId;
+ dynamic_config_.bit_rate = video_config.start_bitrate;
+}
+
+VideoEncoder::~VideoEncoder() {}
+
+bool VideoEncoder::EncodeVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback) {
+ if (video_config_.codec != kVp8) return false;
+
+ if (skip_next_frame_) {
+ ++skip_count_;
+ VLOG(1) << "Skip encoding frame";
+ return false;
+ }
+
+ cast_thread_->PostTask(CastThread::VIDEO_ENCODER, FROM_HERE,
+ base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, this,
+ video_frame, capture_time, dynamic_config_, frame_encoded_callback,
+ frame_release_callback));
+
+ dynamic_config_.key_frame_requested = false;
+ return true;
+}
+
+void VideoEncoder::EncodeVideoFrameEncoderThread(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const CodecDynamicConfig& dynamic_config,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback) {
+ if (dynamic_config.key_frame_requested) {
+ vp8_encoder_->RequestKeyFrame();
+ }
+ vp8_encoder_->LatestFrameIdToReference(
+ dynamic_config.latest_frame_id_to_reference);
+ vp8_encoder_->UpdateRates(dynamic_config.bit_rate);
+
+ scoped_ptr<EncodedVideoFrame> encoded_frame(new EncodedVideoFrame());
+ bool retval = vp8_encoder_->Encode(*video_frame, encoded_frame.get());
+
+ // We are done with the video frame release it.
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE, frame_release_callback);
+
+ if (!retval) {
+ VLOG(1) << "Encoding failed";
+ return;
+ }
+ if (encoded_frame->data.size() <= 0) {
+ VLOG(1) << "Encoding resulted in an empty frame";
+ return;
+ }
+ cast_thread_->PostTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(frame_encoded_callback,
+ base::Passed(&encoded_frame), capture_time));
+}
+
+// Inform the encoder about the new target bit rate.
+void VideoEncoder::SetBitRate(int new_bit_rate) OVERRIDE {
+ dynamic_config_.bit_rate = new_bit_rate;
+}
+
+// Inform the encoder to not encode the next frame.
+void VideoEncoder::SkipNextFrame(bool skip_next_frame) OVERRIDE {
+ skip_next_frame_ = skip_next_frame;
+}
+
+// Inform the encoder to encode the next frame as a key frame.
+void VideoEncoder::GenerateKeyFrame() OVERRIDE {
+ dynamic_config_.key_frame_requested = true;
+}
+
+// Inform the encoder to only reference frames older or equal to frame_id;
+void VideoEncoder::LatestFrameIdToReference(uint8 frame_id) OVERRIDE {
+ dynamic_config_.latest_frame_id_to_reference = frame_id;
+}
+
+int VideoEncoder::NumberOfSkippedFrames() const OVERRIDE {
+ return skip_count_;
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/video_encoder.h b/media/cast/video_sender/video_encoder.h
new file mode 100644
index 0000000..d3b261e
--- /dev/null
+++ b/media/cast/video_sender/video_encoder.h
@@ -0,0 +1,80 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
+
+namespace media {
+namespace cast {
+
+// This object is called external from the main cast thread and internally from
+// the video encoder thread.
+class VideoEncoder : public VideoEncoderController,
+ public base::RefCountedThreadSafe<VideoEncoder> {
+ public:
+ typedef base::Callback<void(scoped_ptr<EncodedVideoFrame>,
+ const base::TimeTicks&)> FrameEncodedCallback;
+
+ VideoEncoder(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ uint8 max_unacked_frames);
+
+ virtual ~VideoEncoder();
+
+ // Called from the main cast thread. This function post the encode task to the
+ // video encoder thread;
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ // Once the encoded frame is ready the frame_encoded_callback is called.
+ bool EncodeVideoFrame(const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback);
+
+ protected:
+ struct CodecDynamicConfig {
+ bool key_frame_requested;
+ uint8 latest_frame_id_to_reference;
+ int bit_rate;
+ };
+
+ // The actual encode, called from the video encoder thread.
+ void EncodeVideoFrameEncoderThread(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const CodecDynamicConfig& dynamic_config,
+ const FrameEncodedCallback& frame_encoded_callback,
+ const base::Closure frame_release_callback);
+
+ // The following functions are called from the main cast thread.
+ virtual void SetBitRate(int new_bit_rate) OVERRIDE;
+ virtual void SkipNextFrame(bool skip_next_frame) OVERRIDE;
+ virtual void GenerateKeyFrame() OVERRIDE;
+ virtual void LatestFrameIdToReference(uint8 frame_id) OVERRIDE;
+ virtual int NumberOfSkippedFrames() const OVERRIDE;
+
+ private:
+ const VideoSenderConfig video_config_;
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<Vp8Encoder> vp8_encoder_;
+ CodecDynamicConfig dynamic_config_;
+ bool skip_next_frame_;
+ int skip_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoEncoder);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_ENCODER_H_
diff --git a/media/cast/video_sender/video_encoder_unittest.cc b/media/cast/video_sender/video_encoder_unittest.cc
new file mode 100644
index 0000000..c3c682d
--- /dev/null
+++ b/media/cast/video_sender/video_encoder_unittest.cc
@@ -0,0 +1,282 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/video_sender/video_encoder.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+using base::RunLoop;
+using base::MessageLoopProxy;
+using base::Thread;
+using testing::_;
+
+static void ReleaseFrame(const I420VideoFrame* frame) {
+ // Empty since we in this test send in the same frame.
+};
+
+class TestVideoEncoderCallback :
+ public base::RefCountedThreadSafe<TestVideoEncoderCallback> {
+ public:
+ TestVideoEncoderCallback() {}
+
+ void SetExpectedResult(bool expected_key_frame,
+ uint8 expected_frame_id,
+ uint8 expected_last_referenced_frame_id,
+ const base::TimeTicks& expected_capture_time) {
+ expected_key_frame_ = expected_key_frame;
+ expected_frame_id_ = expected_frame_id;
+ expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
+ expected_capture_time_ = expected_capture_time;
+ }
+
+ void DeliverEncodedVideoFrame(scoped_ptr<EncodedVideoFrame> encoded_frame,
+ const base::TimeTicks& capture_time) {
+ EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
+ EXPECT_EQ(expected_last_referenced_frame_id_,
+ encoded_frame->last_referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, capture_time);
+ }
+
+ private:
+ bool expected_key_frame_;
+ uint8 expected_frame_id_;
+ uint8 expected_last_referenced_frame_id_;
+ base::TimeTicks expected_capture_time_;
+};
+
+class VideoEncoderTest : public ::testing::Test {
+ public:
+
+ protected:
+ VideoEncoderTest()
+ : pixels_(320 * 240, 123),
+ test_video_encoder_callback_(new TestVideoEncoderCallback()) {
+ video_config_.sender_ssrc = 1;
+ video_config_.incoming_feedback_ssrc = 2;
+ video_config_.rtp_payload_type = 127;
+ video_config_.use_external_encoder = false;
+ video_config_.width = 320;
+ video_config_.height = 240;
+ video_config_.max_bitrate = 5000000;
+ video_config_.min_bitrate = 1000000;
+ video_config_.start_bitrate = 2000000;
+ video_config_.max_qp = 56;
+ video_config_.min_qp = 0;
+ video_config_.max_frame_rate = 30;
+ video_config_.max_number_of_video_buffers_used = 3;
+ video_config_.codec = kVp8;
+ video_frame_.width = 320;
+ video_frame_.height = 240;
+ video_frame_.y_plane.stride = video_frame_.width;
+ video_frame_.y_plane.length = video_frame_.width;
+ video_frame_.y_plane.data = &(pixels_[0]);
+ video_frame_.u_plane.stride = video_frame_.width / 2;
+ video_frame_.u_plane.length = video_frame_.width / 2;
+ video_frame_.u_plane.data = &(pixels_[0]);
+ video_frame_.v_plane.stride = video_frame_.width / 2;
+ video_frame_.v_plane.length = video_frame_.width / 2;
+ video_frame_.v_plane.data = &(pixels_[0]);
+ }
+
+ ~VideoEncoderTest() {}
+
+ virtual void SetUp() {
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ }
+
+ void Configure(uint8 max_unacked_frames) {
+ video_encoder_= new VideoEncoder(cast_thread_, video_config_,
+ max_unacked_frames);
+ video_encoder_controller_ = video_encoder_.get();
+ }
+
+ base::MessageLoop loop_;
+ std::vector<uint8> pixels_;
+ scoped_refptr<TestVideoEncoderCallback> test_video_encoder_callback_;
+ VideoSenderConfig video_config_;
+ scoped_refptr<VideoEncoder> video_encoder_;
+ VideoEncoderController* video_encoder_controller_;
+ I420VideoFrame video_frame_;
+
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(VideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
+ Configure(3);
+
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ base::TimeTicks capture_time;
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 6; ++i) {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+}
+
+TEST_F(VideoEncoderTest, EncodePattern60fpsRunningOutOfAck) {
+ Configure(6);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+
+ video_encoder_controller_->LatestFrameIdToReference(2);
+
+ for (int i = 3; i < 9; ++i) {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame( &video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+}
+
+TEST_F(VideoEncoderTest, EncodePattern60fps200msDelayRunningOutOfAck) {
+ Configure(12);
+
+ base::TimeTicks capture_time;
+ VideoEncoder::FrameEncodedCallback frame_encoded_callback =
+ base::Bind(&TestVideoEncoderCallback::DeliverEncodedVideoFrame,
+ test_video_encoder_callback_.get());
+
+ {
+ RunLoop run_loop;
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(0);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(1);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(2);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+ {
+ RunLoop run_loop;
+ video_encoder_controller_->LatestFrameIdToReference(3);
+ capture_time += base::TimeDelta::FromMilliseconds(33);
+ test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+
+ video_encoder_controller_->LatestFrameIdToReference(4);
+
+ for (int i = 5; i < 17; ++i) {
+ RunLoop run_loop;
+ test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
+ EXPECT_TRUE(video_encoder_->EncodeVideoFrame(&video_frame_, capture_time,
+ frame_encoded_callback, base::Bind(ReleaseFrame, &video_frame_)));
+ run_loop.RunUntilIdle();
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc
new file mode 100644
index 0000000..83f3397
--- /dev/null
+++ b/media/cast/video_sender/video_sender.cc
@@ -0,0 +1,344 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/video_sender/video_sender.h"
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+#include "media/cast/cast_defines.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/video_encoder.h"
+
+namespace media {
+namespace cast {
+
+const int64 kMinSchedulingDelayMs = 1;
+
+class LocalRtcpVideoSenderFeedback : public RtcpSenderFeedback {
+ public:
+ explicit LocalRtcpVideoSenderFeedback(VideoSender* video_sender)
+ : video_sender_(video_sender) {
+ }
+
+ virtual void OnReceivedSendReportRequest() OVERRIDE {}
+
+ virtual void OnReceivedReportBlock(
+ const RtcpReportBlock& report_block) OVERRIDE {}
+
+ virtual void OnReceivedRpsi(uint8 payload_type,
+ uint64 picture_id) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedRemb(uint32 bitrate) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedNackRequest(
+ const std::list<uint16>& nack_sequence_numbers) OVERRIDE {
+ NOTIMPLEMENTED();
+ }
+
+ virtual void OnReceivedIntraFrameRequest() OVERRIDE {
+ video_sender_->OnReceivedIntraFrameRequest();
+ }
+
+ virtual void OnReceivedCastFeedback(
+ const RtcpCastMessage& cast_feedback) OVERRIDE {
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+ }
+
+ private:
+ VideoSender* video_sender_;
+};
+
+class LocalRtpVideoSenderStatistics : public RtpSenderStatistics {
+ public:
+ explicit LocalRtpVideoSenderStatistics(RtpSender* rtp_sender)
+ : rtp_sender_(rtp_sender) {
+ }
+
+ virtual void GetStatistics(const base::TimeTicks& now,
+ RtcpSenderInfo* sender_info) OVERRIDE {
+ rtp_sender_->RtpStatistics(now, sender_info);
+ }
+
+ private:
+ RtpSender* rtp_sender_;
+};
+
+VideoSender::VideoSender(
+ scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender)
+ : incoming_feedback_ssrc_(video_config.incoming_feedback_ssrc),
+ rtp_max_delay_(
+ base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
+ max_frame_rate_(video_config.max_frame_rate),
+ cast_thread_(cast_thread),
+ rtcp_feedback_(new LocalRtcpVideoSenderFeedback(this)),
+ rtp_sender_(new RtpSender(NULL, &video_config, paced_packet_sender)),
+ last_acked_frame_id_(-1),
+ last_sent_frame_id_(-1),
+ last_sent_key_frame_id_(-1),
+ duplicate_ack_(0),
+ last_skip_count_(0),
+ congestion_control_(video_config.congestion_control_back_off,
+ video_config.max_bitrate,
+ video_config.min_bitrate,
+ video_config.start_bitrate),
+ clock_(&default_tick_clock_),
+ weak_factory_(this) {
+ max_unacked_frames_ = static_cast<uint8>(video_config.rtp_max_delay_ms *
+ video_config.max_frame_rate / 1000);
+ DCHECK(max_unacked_frames_ > 0) << "Invalid argument";
+
+ rtp_video_sender_statistics_.reset(
+ new LocalRtpVideoSenderStatistics(rtp_sender_.get()));
+
+ if (video_config.use_external_encoder) {
+ DCHECK(video_encoder_controller) << "Invalid argument";
+ video_encoder_controller_ = video_encoder_controller;
+ } else {
+ video_encoder_ = new VideoEncoder(cast_thread, video_config,
+ max_unacked_frames_);
+ video_encoder_controller_ = video_encoder_.get();
+ }
+ rtcp_.reset(new Rtcp(
+ rtcp_feedback_.get(),
+ paced_packet_sender,
+ rtp_video_sender_statistics_.get(),
+ NULL,
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ true,
+ video_config.sender_ssrc,
+ video_config.rtcp_c_name));
+
+ rtcp_->SetRemoteSSRC(video_config.incoming_feedback_ssrc);
+ ScheduleNextRtcpReport();
+ ScheduleNextResendCheck();
+ ScheduleNextSkippedFramesCheck();
+}
+
+VideoSender::~VideoSender() {}
+
+void VideoSender::InsertRawVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) {
+ DCHECK(video_encoder_.get()) << "Invalid state";
+
+ if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time,
+ base::Bind(&VideoSender::SendEncodedVideoFrameMainThread,
+ weak_factory_.GetWeakPtr()), callback)) {
+ VLOG(1) << "Failed to InsertRawVideoFrame";
+ }
+}
+
+void VideoSender::InsertCodedVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback) {
+ DCHECK(!video_encoder_.get()) << "Invalid state";
+ DCHECK(encoded_frame) << "Invalid argument";
+
+ SendEncodedVideoFrame(encoded_frame, capture_time);
+ callback.Run();
+}
+
+void VideoSender::SendEncodedVideoFrameMainThread(
+ scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& capture_time) {
+ SendEncodedVideoFrame(video_frame.get(), capture_time);
+}
+
+void VideoSender::SendEncodedVideoFrame(const EncodedVideoFrame* encoded_frame,
+ const base::TimeTicks& capture_time) {
+ last_send_time_ = clock_->NowTicks();
+ rtp_sender_->IncomingEncodedVideoFrame(encoded_frame, capture_time);
+ if (encoded_frame->key_frame) {
+ last_sent_key_frame_id_ = encoded_frame->frame_id;
+ }
+ last_sent_frame_id_ = encoded_frame->frame_id;
+ UpdateFramesInFlight();
+}
+
+void VideoSender::OnReceivedIntraFrameRequest() {
+ if (last_sent_key_frame_id_ != -1) {
+ uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
+ static_cast<uint8>(last_sent_key_frame_id_);
+ if (frames_in_flight < (max_unacked_frames_ - 1)) return;
+ }
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+}
+
+void VideoSender::IncomingRtcpPacket(const uint8* packet, int length) {
+ rtcp_->IncomingRtcpPacket(packet, length);
+}
+
+void VideoSender::ScheduleNextRtcpReport() {
+ base::TimeDelta time_to_next =
+ rtcp_->TimeToSendNextRtcpReport() - clock_->NowTicks();
+
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::SendRtcpReport, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::SendRtcpReport() {
+ rtcp_->SendRtcpReport(incoming_feedback_ssrc_);
+ ScheduleNextRtcpReport();
+}
+
+void VideoSender::ScheduleNextResendCheck() {
+ base::TimeDelta time_to_next;
+ if (last_send_time_.is_null()) {
+ time_to_next = rtp_max_delay_;
+ } else {
+ time_to_next = last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+ }
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::ResendCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::ResendCheck() {
+ if (!last_send_time_.is_null() && last_sent_frame_id_ != -1) {
+ base::TimeDelta time_to_next =
+ last_send_time_ - clock_->NowTicks() + rtp_max_delay_;
+
+ if (last_acked_frame_id_ == -1) {
+ // We have not received any ack, send a key frame.
+ video_encoder_controller_->GenerateKeyFrame();
+ last_acked_frame_id_ = -1;
+ last_sent_frame_id_ = -1;
+ UpdateFramesInFlight();
+ } else {
+ ResendFrame(static_cast<uint8>(last_acked_frame_id_ + 1));
+ }
+ }
+ ScheduleNextResendCheck();
+}
+
+void VideoSender::ScheduleNextSkippedFramesCheck() {
+ base::TimeDelta time_to_next;
+ if (last_checked_skip_count_time_.is_null()) {
+ time_to_next =
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ } else {
+ time_to_next = last_checked_skip_count_time_ - clock_->NowTicks() +
+ base::TimeDelta::FromMilliseconds(kSkippedFramesCheckPeriodkMs);
+ }
+ time_to_next = std::max(time_to_next,
+ base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
+
+ cast_thread_->PostDelayedTask(CastThread::MAIN, FROM_HERE,
+ base::Bind(&VideoSender::SkippedFramesCheck, weak_factory_.GetWeakPtr()),
+ time_to_next);
+}
+
+void VideoSender::SkippedFramesCheck() {
+ int skip_count = video_encoder_controller_->NumberOfSkippedFrames();
+ if (skip_count - last_skip_count_ >
+ kSkippedFramesThreshold * max_frame_rate_) {
+ // TODO(pwestin): Propagate this up to the application.
+ }
+ last_skip_count_ = skip_count;
+ last_checked_skip_count_time_ = clock_->NowTicks();
+ ScheduleNextSkippedFramesCheck();
+}
+
+void VideoSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
+ base::TimeDelta rtt;
+ base::TimeDelta avg_rtt;
+ base::TimeDelta min_rtt;
+ base::TimeDelta max_rtt;
+
+ if (rtcp_->Rtt(&rtt, &avg_rtt, &min_rtt, &max_rtt)) {
+ // Don't use a RTT lower than our average.
+ rtt = std::max(rtt, avg_rtt);
+ } else {
+ // We have no measured value use default.
+ rtt = base::TimeDelta::FromMilliseconds(kStartRttMs);
+ }
+ if (cast_feedback.missing_frames_and_packets_.empty()) {
+ // No lost packets.
+ int resend_frame = -1;
+ if (last_sent_frame_id_ == -1) return;
+
+ video_encoder_controller_->LatestFrameIdToReference(
+ cast_feedback.ack_frame_id_);
+
+ if (static_cast<uint8>(last_acked_frame_id_ + 1) ==
+ cast_feedback.ack_frame_id_) {
+ uint32 new_bitrate = 0;
+ if (congestion_control_.OnAck(rtt, &new_bitrate)) {
+ video_encoder_controller_->SetBitRate(new_bitrate);
+ }
+ }
+ if (last_acked_frame_id_ == cast_feedback.ack_frame_id_ &&
+ // We only count duplicate ACKs when we have sent newer frames.
+ IsNewerFrameId(last_sent_frame_id_, last_acked_frame_id_)) {
+ duplicate_ack_++;
+ } else {
+ duplicate_ack_ = 0;
+ }
+ if (duplicate_ack_ >= 2 && duplicate_ack_ % 3 == 2) {
+ // Resend last ACK + 1 frame.
+ resend_frame = static_cast<uint8>(last_acked_frame_id_ + 1);
+ }
+ if (resend_frame != -1) {
+ ResendFrame(static_cast<uint8>(resend_frame));
+ }
+ } else {
+ rtp_sender_->ResendPackets(cast_feedback.missing_frames_and_packets_);
+ last_send_time_ = clock_->NowTicks();
+
+ uint32 new_bitrate = 0;
+ if (congestion_control_.OnNack(rtt, &new_bitrate)) {
+ video_encoder_controller_->SetBitRate(new_bitrate);
+ }
+ }
+ ReceivedAck(cast_feedback.ack_frame_id_);
+}
+
+void VideoSender::ReceivedAck(uint8 acked_frame_id) {
+ last_acked_frame_id_ = acked_frame_id;
+ UpdateFramesInFlight();
+}
+
+void VideoSender::UpdateFramesInFlight() {
+ if (last_sent_frame_id_ != -1) {
+ uint8 frames_in_flight = static_cast<uint8>(last_sent_frame_id_) -
+ static_cast<uint8>(last_acked_frame_id_);
+ if (frames_in_flight >= max_unacked_frames_) {
+ video_encoder_controller_->SkipNextFrame(true);
+ return;
+ }
+ }
+ video_encoder_controller_->SkipNextFrame(false);
+}
+
+void VideoSender::ResendFrame(uint8 resend_frame_id) {
+ MissingFramesAndPacketsMap missing_frames_and_packets;
+ PacketIdSet missing;
+ missing_frames_and_packets.insert(std::make_pair(resend_frame_id, missing));
+ rtp_sender_->ResendPackets(missing_frames_and_packets);
+ last_send_time_ = clock_->NowTicks();
+}
+
+} // namespace cast
+} // namespace media
diff --git a/media/cast/video_sender/video_sender.gypi b/media/cast/video_sender/video_sender.gypi
new file mode 100644
index 0000000..9499066
--- /dev/null
+++ b/media/cast/video_sender/video_sender.gypi
@@ -0,0 +1,31 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'codecs/vp8/vp8_encoder.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'video_sender',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '<(DEPTH)/',
+ '<(DEPTH)/third_party/',
+ ],
+ 'sources': [
+ 'video_encoder.h',
+ 'video_encoder.cc',
+ 'video_sender.h',
+ 'video_sender.cc',
+ ], # source
+ 'dependencies': [
+ '<(DEPTH)/media/cast/rtcp/rtcp.gyp:*',
+ '<(DEPTH)/media/cast/rtp_sender/rtp_sender.gyp:*',
+ 'congestion_control',
+ 'cast_vp8_encoder',
+ ],
+ },
+ ],
+}
diff --git a/media/cast/video_sender/video_sender.h b/media/cast/video_sender/video_sender.h
new file mode 100644
index 0000000..ff45326
--- /dev/null
+++ b/media/cast/video_sender/video_sender.h
@@ -0,0 +1,144 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+#define MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/congestion_control/congestion_control.h"
+#include "media/cast/rtcp/rtcp.h"
+#include "media/cast/rtp_sender/rtp_sender.h"
+
+namespace media {
+namespace cast {
+
+class VideoEncoder;
+class LocalRtcpVideoSenderFeedback;
+class LocalRtpVideoSenderStatistics;
+class LocalVideoEncoderCallback;
+class PacedPacketSender;
+
+// Not thread safe. Only called from the main cast thread.
+// This class owns all objects related to sending video, objects that create RTP
+// packets, congestion control, video encoder, parsing and sending of
+// RTCP packets.
+// Additionally it posts a bunch of delayed tasks to the main thread for various
+// timeouts.
+class VideoSender : public base::NonThreadSafe,
+ public base::SupportsWeakPtr<VideoSender> {
+ public:
+ VideoSender(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender);
+
+ virtual ~VideoSender();
+
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the video encoder thread as soon as
+ // the encoder is done with the frame; it does not mean that the encoded frame
+ // has been sent out.
+ void InsertRawVideoFrame(
+ const I420VideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback);
+
+ // The video_frame must be valid until the closure callback is called.
+ // The closure callback is called from the main thread as soon as
+ // the cast sender is done with the frame; it does not mean that the encoded
+ // frame has been sent out.
+ void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time,
+ const base::Closure callback);
+
+ // Only called from the main cast thread.
+ void IncomingRtcpPacket(const uint8* packet, int length);
+
+ void set_clock(base::TickClock* clock) {
+ clock_ = clock;
+ congestion_control_.set_clock(clock);
+ rtcp_->set_clock(clock);
+ rtp_sender_->set_clock(clock);
+ }
+
+ protected:
+ // Protected for testability.
+ void OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback);
+
+ private:
+ friend class LocalRtcpVideoSenderFeedback;
+
+ // Schedule when we should send the next RTPC report,
+ // via a PostDelayedTask to the main cast thread.
+ void ScheduleNextRtcpReport();
+ void SendRtcpReport();
+
+ // Schedule when we should check that we have received an acknowledgment, or a
+ // loss report from our remote peer. If we have not heard back from our remote
+ // peer we speculatively resend our oldest unacknowledged frame (the whole
+ // frame). Note for this to happen we need to lose all pending packets (in
+ // normal operation 3 full frames), hence this is the last resort to prevent
+ // us getting stuck after a long outage.
+ void ScheduleNextResendCheck();
+ void ResendCheck();
+
+ // Monitor how many frames that are silently dropped by the video sender
+ // per time unit.
+ void ScheduleNextSkippedFramesCheck();
+ void SkippedFramesCheck();
+
+ void SendEncodedVideoFrame(const EncodedVideoFrame* video_frame,
+ const base::TimeTicks& capture_time);
+ void OnReceivedIntraFrameRequest();
+ void ResendFrame(uint8 resend_frame_id);
+ void ReceivedAck(uint8 acked_frame_id);
+ void UpdateFramesInFlight();
+
+ void SendEncodedVideoFrameMainThread(
+ scoped_ptr<EncodedVideoFrame> video_frame,
+ const base::TimeTicks& capture_time);
+
+ const uint32 incoming_feedback_ssrc_;
+ const base::TimeDelta rtp_max_delay_;
+ const int max_frame_rate_;
+
+ scoped_refptr<CastThread> cast_thread_;
+ scoped_ptr<LocalRtcpVideoSenderFeedback> rtcp_feedback_;
+ scoped_ptr<LocalRtpVideoSenderStatistics> rtp_video_sender_statistics_;
+ scoped_refptr<VideoEncoder> video_encoder_;
+ scoped_ptr<Rtcp> rtcp_;
+ scoped_ptr<RtpSender> rtp_sender_;
+ VideoEncoderController* video_encoder_controller_;
+ uint8 max_unacked_frames_;
+ int last_acked_frame_id_;
+ int last_sent_frame_id_;
+ int last_sent_key_frame_id_;
+ int duplicate_ack_;
+ base::TimeTicks last_send_time_;
+ base::TimeTicks last_checked_skip_count_time_;
+ int last_skip_count_;
+ CongestionControl congestion_control_;
+
+ base::DefaultTickClock default_tick_clock_;
+ base::TickClock* clock_;
+
+ base::WeakPtrFactory<VideoSender> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoSender);
+};
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_VIDEO_SENDER_VIDEO_SENDER_H_
+
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/video_sender/video_sender_unittest.cc
new file mode 100644
index 0000000..4bfe33e
--- /dev/null
+++ b/media/cast/video_sender/video_sender_unittest.cc
@@ -0,0 +1,225 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "media/cast/cast_thread.h"
+#include "media/cast/pacing/mock_paced_packet_sender.h"
+#include "media/cast/pacing/paced_sender.h"
+#include "media/cast/video_sender/mock_video_encoder_controller.h"
+#include "media/cast/video_sender/video_sender.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+namespace cast {
+
+static const int64 kStartMillisecond = 123456789;
+
+using base::RunLoop;
+using testing::_;
+
+class PeerVideoSender : public VideoSender {
+ public:
+ PeerVideoSender(scoped_refptr<CastThread> cast_thread,
+ const VideoSenderConfig& video_config,
+ VideoEncoderController* const video_encoder_controller,
+ PacedPacketSender* const paced_packet_sender)
+ : VideoSender(cast_thread, video_config, video_encoder_controller,
+ paced_packet_sender) {
+ }
+ using VideoSender::OnReceivedCastFeedback;
+};
+
+static void ReleaseVideoFrame(const I420VideoFrame* frame) {
+ delete [] frame->y_plane.data;
+ delete [] frame->u_plane.data;
+ delete [] frame->v_plane.data;
+ delete frame;
+};
+
+static void ReleaseEncodedFrame(const EncodedVideoFrame* frame) {
+ // Do nothing.
+};
+
+class VideoSenderTest : public ::testing::Test {
+ protected:
+ VideoSenderTest() {
+ testing_clock_.Advance(
+ base::TimeDelta::FromMilliseconds(kStartMillisecond));
+ }
+
+ ~VideoSenderTest() {}
+
+ void InitEncoder(bool external) {
+ VideoSenderConfig video_config;
+ video_config.sender_ssrc = 1;
+ video_config.incoming_feedback_ssrc = 2;
+ video_config.rtp_payload_type = 127;
+ video_config.use_external_encoder = external;
+ video_config.width = 320;
+ video_config.height = 240;
+ video_config.max_bitrate = 5000000;
+ video_config.min_bitrate = 1000000;
+ video_config.start_bitrate = 1000000;
+ video_config.max_qp = 56;
+ video_config.min_qp = 0;
+ video_config.max_frame_rate = 30;
+ video_config.max_number_of_video_buffers_used = 3;
+ video_config.codec = kVp8;
+
+ if (external) {
+ video_sender_.reset(new PeerVideoSender(cast_thread_, video_config,
+ &mock_video_encoder_controller_, &mock_transport_));
+ } else {
+ video_sender_.reset(new PeerVideoSender(cast_thread_, video_config, NULL,
+ &mock_transport_));
+ }
+ video_sender_->set_clock(&testing_clock_);
+ }
+
+ virtual void SetUp() {
+ cast_thread_ = new CastThread(MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current(),
+ MessageLoopProxy::current());
+ }
+
+ I420VideoFrame* AllocateNewVideoFrame() {
+ I420VideoFrame* video_frame = new I420VideoFrame();
+ video_frame->width = 320;
+ video_frame->height = 240;
+
+ video_frame->y_plane.stride = video_frame->width;
+ video_frame->y_plane.length = video_frame->width;
+ video_frame->y_plane.data =
+ new uint8[video_frame->width * video_frame->height];
+ memset(video_frame->y_plane.data, 123,
+ video_frame->width * video_frame->height);
+ video_frame->u_plane.stride = video_frame->width / 2;
+ video_frame->u_plane.length = video_frame->width / 2;
+ video_frame->u_plane.data =
+ new uint8[video_frame->width * video_frame->height / 4];
+ memset(video_frame->u_plane.data, 123,
+ video_frame->width * video_frame->height / 4);
+ video_frame->v_plane.stride = video_frame->width / 2;
+ video_frame->v_plane.length = video_frame->width / 2;
+ video_frame->v_plane.data =
+ new uint8[video_frame->width * video_frame->height / 4];
+ memset(video_frame->v_plane.data, 123,
+ video_frame->width * video_frame->height / 4);
+ return video_frame;
+ }
+
+ base::MessageLoop loop_;
+ MockVideoEncoderController mock_video_encoder_controller_;
+ base::SimpleTestTickClock testing_clock_;
+ MockPacedPacketSender mock_transport_;
+ scoped_ptr<PeerVideoSender> video_sender_;
+ scoped_refptr<CastThread> cast_thread_;
+};
+
+TEST_F(VideoSenderTest, BuiltInEncoder) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+
+ RunLoop run_loop;
+ InitEncoder(false);
+ I420VideoFrame* video_frame = AllocateNewVideoFrame();
+
+ base::TimeTicks capture_time;
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ run_loop.RunUntilIdle();
+}
+
+TEST_F(VideoSenderTest, ExternalEncoder) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(1);
+ EXPECT_CALL(mock_video_encoder_controller_, SkipNextFrame(false)).Times(1);
+ InitEncoder(true);
+
+ EncodedVideoFrame video_frame;
+ base::TimeTicks capture_time;
+
+ video_frame.codec = kVp8;
+ video_frame.key_frame = true;
+ video_frame.frame_id = 0;
+ video_frame.last_referenced_frame_id = 0;
+ video_frame.data.insert(video_frame.data.begin(), 123, 1000);
+
+ video_sender_->InsertCodedVideoFrame(&video_frame, capture_time,
+ base::Bind(&ReleaseEncodedFrame, &video_frame));
+}
+
+TEST_F(VideoSenderTest, RtcpTimer) {
+ EXPECT_CALL(mock_transport_, SendRtcpPacket(_)).Times(1);
+ RunLoop run_loop;
+ InitEncoder(false);
+
+ // Make sure that we send at least one RTCP packet.
+ base::TimeDelta max_rtcp_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtcpIntervalMs * 3 / 2);
+ testing_clock_.Advance(max_rtcp_timeout);
+
+ // TODO(pwestin): haven't found a way to make the post delayed task to go
+ // faster than a real-time.
+ base::PlatformThread::Sleep(max_rtcp_timeout);
+ run_loop.RunUntilIdle();
+}
+
+TEST_F(VideoSenderTest, ResendTimer) {
+ EXPECT_CALL(mock_transport_, SendPacket(_, _)).Times(2);
+ EXPECT_CALL(mock_transport_, ResendPacket(_, _)).Times(1);
+
+ RunLoop run_loop;
+ InitEncoder(false);
+
+ I420VideoFrame* video_frame = AllocateNewVideoFrame();
+
+ base::TimeTicks capture_time;
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ run_loop.RunUntilIdle();
+
+ // ACK the key frame.
+ RtcpCastMessage cast_feedback(1);
+ cast_feedback.media_ssrc_ = 2;
+ cast_feedback.ack_frame_id_ = 0;
+ video_sender_->OnReceivedCastFeedback(cast_feedback);
+
+ video_frame = AllocateNewVideoFrame();
+ video_sender_->InsertRawVideoFrame(video_frame, capture_time,
+ base::Bind(&ReleaseVideoFrame, video_frame));
+
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+
+ base::TimeDelta max_resend_timeout =
+ base::TimeDelta::FromMilliseconds(1 + kDefaultRtpMaxDelayMs);
+
+ // Make sure that we do a re-send.
+ testing_clock_.Advance(max_resend_timeout);
+
+ // TODO(pwestin): haven't found a way to make the post delayed task to go
+ // faster than a real-time.
+ base::PlatformThread::Sleep(max_resend_timeout);
+ {
+ RunLoop run_loop;
+ run_loop.RunUntilIdle();
+ }
+}
+
+} // namespace cast
+} // namespace media
+