summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authormiu@chromium.org <miu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-05-20 06:40:49 +0000
committermiu@chromium.org <miu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-05-20 06:40:49 +0000
commitb72e681f5bcf702e54e52b11bd89954eb098c9f3 (patch)
treeb124b21e12592adbf0fa8ac3956d19f270464dbf /media
parentc045994a5013011f05264de8a7b47d2a9e6fb1ab (diff)
downloadchromium_src-b72e681f5bcf702e54e52b11bd89954eb098c9f3.zip
chromium_src-b72e681f5bcf702e54e52b11bd89954eb098c9f3.tar.gz
chromium_src-b72e681f5bcf702e54e52b11bd89954eb098c9f3.tar.bz2
[Cast] EncodedAudioFrame+EncodedVideoFrame+reference_time --> EncodedFrame
Replace the use of struct EncodedAudioFrame and struct EncodedVideoFrame with a unified struct EncodedFrame throughout src/media/cast. This allows for later merging of duplicated code throughout the stack. The new struct drops the unused codec data member and adds a new data member, reference_time. codec is unnecessary because this is known in all contexts where an EncodedFrame is produced/consumed. The new reference_time data member is added because virtually all interfaces that passed around an EncodedXXXFrame were also passing the frame's reference_time alongside. Testing: Updated and ran all cast_unittests, plus manual testing between multiple Cast sender and receiver implementations. Review URL: https://codereview.chromium.org/288103002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@271594 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/cast/audio_receiver/audio_decoder.cc19
-rw-r--r--media/cast/audio_receiver/audio_decoder.h2
-rw-r--r--media/cast/audio_receiver/audio_decoder_unittest.cc19
-rw-r--r--media/cast/audio_receiver/audio_receiver.cc20
-rw-r--r--media/cast/audio_receiver/audio_receiver.h11
-rw-r--r--media/cast/audio_receiver/audio_receiver_unittest.cc10
-rw-r--r--media/cast/audio_sender/audio_encoder.cc21
-rw-r--r--media/cast/audio_sender/audio_encoder.h4
-rw-r--r--media/cast/audio_sender/audio_encoder_unittest.cc12
-rw-r--r--media/cast/audio_sender/audio_sender.cc8
-rw-r--r--media/cast/audio_sender/audio_sender.h4
-rw-r--r--media/cast/audio_sender/audio_sender_unittest.cc6
-rw-r--r--media/cast/cast_receiver.h17
-rw-r--r--media/cast/cast_receiver_impl.cc4
-rw-r--r--media/cast/framer/frame_buffer.cc47
-rw-r--r--media/cast/framer/frame_buffer.h8
-rw-r--r--media/cast/framer/frame_buffer_unittest.cc20
-rw-r--r--media/cast/framer/framer.cc8
-rw-r--r--media/cast/framer/framer.h4
-rw-r--r--media/cast/framer/framer_unittest.cc65
-rw-r--r--media/cast/test/encode_decode_test.cc136
-rw-r--r--media/cast/test/end2end_unittest.cc9
-rw-r--r--media/cast/transport/cast_transport_config.cc12
-rw-r--r--media/cast/transport/cast_transport_config.h74
-rw-r--r--media/cast/transport/cast_transport_sender.h7
-rw-r--r--media/cast/transport/cast_transport_sender_impl.cc10
-rw-r--r--media/cast/transport/cast_transport_sender_impl.h9
-rw-r--r--media/cast/transport/rtp_sender/mock_rtp_sender.h35
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc66
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h17
-rw-r--r--media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc16
-rw-r--r--media/cast/transport/rtp_sender/rtp_sender.cc12
-rw-r--r--media/cast/transport/rtp_sender/rtp_sender.h8
-rw-r--r--media/cast/transport/transport_audio_sender.cc20
-rw-r--r--media/cast/transport/transport_audio_sender.h7
-rw-r--r--media/cast/transport/transport_video_sender.cc31
-rw-r--r--media/cast/transport/transport_video_sender.h7
-rw-r--r--media/cast/video_receiver/codecs/vp8/vp8_decoder.h45
-rw-r--r--media/cast/video_receiver/video_decoder.cc15
-rw-r--r--media/cast/video_receiver/video_decoder.h2
-rw-r--r--media/cast/video_receiver/video_decoder_unittest.cc11
-rw-r--r--media/cast/video_receiver/video_receiver.cc21
-rw-r--r--media/cast/video_receiver/video_receiver.h11
-rw-r--r--media/cast/video_receiver/video_receiver_unittest.cc8
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.cc41
-rw-r--r--media/cast/video_sender/codecs/vp8/vp8_encoder.h2
-rw-r--r--media/cast/video_sender/external_video_encoder.cc31
-rw-r--r--media/cast/video_sender/external_video_encoder_unittest.cc36
-rw-r--r--media/cast/video_sender/fake_software_video_encoder.cc19
-rw-r--r--media/cast/video_sender/fake_software_video_encoder.h2
-rw-r--r--media/cast/video_sender/software_video_encoder.h4
-rw-r--r--media/cast/video_sender/video_encoder.h4
-rw-r--r--media/cast/video_sender/video_encoder_impl.cc17
-rw-r--r--media/cast/video_sender/video_encoder_impl.h4
-rw-r--r--media/cast/video_sender/video_encoder_impl_unittest.cc53
-rw-r--r--media/cast/video_sender/video_sender.cc20
-rw-r--r--media/cast/video_sender/video_sender.h5
-rw-r--r--media/cast/video_sender/video_sender_unittest.cc19
58 files changed, 435 insertions, 720 deletions
diff --git a/media/cast/audio_receiver/audio_decoder.cc b/media/cast/audio_receiver/audio_decoder.cc
index 4e75473..b767971 100644
--- a/media/cast/audio_receiver/audio_decoder.cc
+++ b/media/cast/audio_receiver/audio_decoder.cc
@@ -9,7 +9,6 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
-#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "media/cast/cast_defines.h"
#include "third_party/opus/src/include/opus.h"
@@ -40,20 +39,10 @@ class AudioDecoder::ImplBase
return cast_initialization_status_;
}
- void DecodeFrame(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK_EQ(cast_initialization_status_, STATUS_AUDIO_INITIALIZED);
- scoped_ptr<AudioBus> decoded_audio;
- if (encoded_frame->codec != codec_) {
- NOTREACHED();
- cast_environment_->PostTask(CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(callback,
- base::Passed(&decoded_audio),
- false));
- }
-
COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
size_of_frame_id_types_do_not_match);
bool is_continuous = true;
@@ -68,8 +57,8 @@ class AudioDecoder::ImplBase
}
last_frame_id_ = encoded_frame->frame_id;
- decoded_audio = Decode(
- reinterpret_cast<uint8*>(string_as_array(&encoded_frame->data)),
+ scoped_ptr<AudioBus> decoded_audio = Decode(
+ encoded_frame->mutable_bytes(),
static_cast<int>(encoded_frame->data.size()));
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
@@ -239,7 +228,7 @@ CastInitializationStatus AudioDecoder::InitializationResult() const {
}
void AudioDecoder::DecodeFrame(
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
diff --git a/media/cast/audio_receiver/audio_decoder.h b/media/cast/audio_receiver/audio_decoder.h
index 0e10eba..d624141 100644
--- a/media/cast/audio_receiver/audio_decoder.h
+++ b/media/cast/audio_receiver/audio_decoder.h
@@ -42,7 +42,7 @@ class AudioDecoder {
// monotonically-increasing by 1 for each successive call to this method.
// When it is not, the decoder will assume one or more frames have been
// dropped (e.g., due to packet loss), and will perform recovery actions.
- void DecodeFrame(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback);
private:
diff --git a/media/cast/audio_receiver/audio_decoder_unittest.cc b/media/cast/audio_receiver/audio_decoder_unittest.cc
index d32dbe1..ee7846c 100644
--- a/media/cast/audio_receiver/audio_decoder_unittest.cc
+++ b/media/cast/audio_receiver/audio_decoder_unittest.cc
@@ -4,7 +4,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/stl_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/sys_byteorder.h"
@@ -71,15 +70,16 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
total_audio_decoded_ = base::TimeDelta();
}
- // Called from the unit test thread to create another EncodedAudioFrame and
- // push it into the decoding pipeline.
+ // Called from the unit test thread to create another EncodedFrame and push it
+ // into the decoding pipeline.
void FeedMoreAudio(const base::TimeDelta& duration,
int num_dropped_frames) {
- // Prepare a simulated EncodedAudioFrame to feed into the AudioDecoder.
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
- new transport::EncodedAudioFrame());
- encoded_frame->codec = GetParam().codec;
+ // Prepare a simulated EncodedFrame to feed into the AudioDecoder.
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ encoded_frame->dependency = transport::EncodedFrame::KEY;
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
last_frame_id_ = encoded_frame->frame_id;
const scoped_ptr<AudioBus> audio_bus(
@@ -93,7 +93,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
if (GetParam().codec == transport::kPcm16) {
encoded_frame->data.resize(num_elements * sizeof(int16));
int16* const pcm_data =
- reinterpret_cast<int16*>(string_as_array(&encoded_frame->data));
+ reinterpret_cast<int16*>(encoded_frame->mutable_bytes());
for (size_t i = 0; i < interleaved.size(); ++i)
pcm_data[i] = static_cast<int16>(base::HostToNet16(interleaved[i]));
} else if (GetParam().codec == transport::kOpus) {
@@ -105,8 +105,7 @@ class AudioDecoderTest : public ::testing::TestWithParam<TestScenario> {
opus_encode(opus_encoder,
&interleaved.front(),
audio_bus->frames(),
- reinterpret_cast<unsigned char*>(
- string_as_array(&encoded_frame->data)),
+ encoded_frame->mutable_bytes(),
encoded_frame->data.size());
CHECK_GT(payload_size, 1);
encoded_frame->data.resize(payload_size);
diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/audio_receiver/audio_receiver.cc
index 212419c..ab06c53 100644
--- a/media/cast/audio_receiver/audio_receiver.cc
+++ b/media/cast/audio_receiver/audio_receiver.cc
@@ -114,15 +114,15 @@ void AudioReceiver::GetRawAudioFrame(
void AudioReceiver::DecodeEncodedAudioFrame(
const AudioFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!encoded_frame) {
- callback.Run(make_scoped_ptr<AudioBus>(NULL), playout_time, false);
+ callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false);
return;
}
const uint32 frame_id = encoded_frame->frame_id;
const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ const base::TimeTicks playout_time = encoded_frame->reference_time;
audio_decoder_->DecodeFrame(encoded_frame.Pass(),
base::Bind(&AudioReceiver::EmitRawAudioFrame,
cast_environment_,
@@ -153,8 +153,7 @@ void AudioReceiver::EmitRawAudioFrame(
callback.Run(audio_bus.Pass(), playout_time, is_continuous);
}
-void AudioReceiver::GetEncodedAudioFrame(
- const AudioFrameEncodedCallback& callback) {
+void AudioReceiver::GetEncodedAudioFrame(const FrameEncodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
frame_request_queue_.push_back(callback);
EmitAvailableEncodedFrames();
@@ -167,8 +166,8 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
// Attempt to peek at the next completed frame from the |framer_|.
// TODO(miu): We should only be peeking at the metadata, and not copying the
// payload yet! Or, at least, peek using a StringPiece instead of a copy.
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame(
- new transport::EncodedAudioFrame());
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
bool is_consecutively_next_frame = false;
if (!framer_.GetEncodedAudioFrame(encoded_frame.get(),
&is_consecutively_next_frame)) {
@@ -216,14 +215,13 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
encoded_frame->data.swap(decrypted_audio_data);
}
- // At this point, we have a decrypted EncodedAudioFrame ready to be emitted.
- encoded_frame->codec = codec_;
+ // At this point, we have a decrypted EncodedFrame ready to be emitted.
+ encoded_frame->reference_time = playout_time;
framer_.ReleaseFrame(encoded_frame->frame_id);
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(frame_request_queue_.front(),
- base::Passed(&encoded_frame),
- playout_time));
+ base::Passed(&encoded_frame)));
frame_request_queue_.pop_front();
}
}
diff --git a/media/cast/audio_receiver/audio_receiver.h b/media/cast/audio_receiver/audio_receiver.h
index 5cc8f88..6ce01f6 100644
--- a/media/cast/audio_receiver/audio_receiver.h
+++ b/media/cast/audio_receiver/audio_receiver.h
@@ -76,7 +76,7 @@ class AudioReceiver : public RtpReceiver,
//
// The given |callback| is guaranteed to be run at some point in the future,
// even if to respond with NULL at shutdown time.
- void GetEncodedAudioFrame(const AudioFrameEncodedCallback& callback);
+ void GetEncodedAudioFrame(const FrameEncodedCallback& callback);
// Deliver another packet, possibly a duplicate, and possibly out-of-order.
void IncomingPacket(scoped_ptr<Packet> packet);
@@ -106,12 +106,11 @@ class AudioReceiver : public RtpReceiver,
// EmitAvailableEncodedFrames().
void EmitAvailableEncodedFramesAfterWaiting();
- // Feeds an EncodedAudioFrame into |audio_decoder_|. GetRawAudioFrame() uses
- // this as a callback for GetEncodedAudioFrame().
+ // Feeds an EncodedFrame into |audio_decoder_|. GetRawAudioFrame() uses this
+ // as a callback for GetEncodedAudioFrame().
void DecodeEncodedAudioFrame(
const AudioFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& playout_time);
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
// Return the playout time based on the current time and rtp timestamp.
base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
@@ -162,7 +161,7 @@ class AudioReceiver : public RtpReceiver,
transport::TransportEncryptionHandler decryptor_;
// Outstanding callbacks to run to deliver on client requests for frames.
- std::list<AudioFrameEncodedCallback> frame_request_queue_;
+ std::list<FrameEncodedCallback> frame_request_queue_;
// True while there's an outstanding task to re-invoke
// EmitAvailableEncodedFrames().
diff --git a/media/cast/audio_receiver/audio_receiver_unittest.cc b/media/cast/audio_receiver/audio_receiver_unittest.cc
index f2725ff..bcb7e34 100644
--- a/media/cast/audio_receiver/audio_receiver_unittest.cc
+++ b/media/cast/audio_receiver/audio_receiver_unittest.cc
@@ -37,13 +37,11 @@ class FakeAudioClient {
}
void DeliverEncodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> audio_frame) {
ASSERT_FALSE(!audio_frame)
<< "If at shutdown: There were unsatisfied requests enqueued.";
EXPECT_EQ(expected_frame_id_, audio_frame->frame_id);
- EXPECT_EQ(transport::kPcm16, audio_frame->codec);
- EXPECT_EQ(expected_playout_time_, playout_time);
+ EXPECT_EQ(expected_playout_time_, audio_frame->reference_time);
num_called_++;
}
@@ -92,7 +90,7 @@ class AudioReceiverTest : public ::testing::Test {
rtp_header_.frame_id = kFirstFrameId;
rtp_header_.packet_id = 0;
rtp_header_.max_packet_id = 0;
- rtp_header_.reference_frame_id = 0;
+ rtp_header_.reference_frame_id = rtp_header_.frame_id;
rtp_header_.rtp_timestamp = 0;
}
@@ -154,7 +152,7 @@ TEST_F(AudioReceiverTest, MultiplePendingGetCalls) {
.WillRepeatedly(testing::Return(true));
// Enqueue a request for an audio frame.
- const AudioFrameEncodedCallback frame_encoded_callback =
+ const FrameEncodedCallback frame_encoded_callback =
base::Bind(&FakeAudioClient::DeliverEncodedAudioFrame,
base::Unretained(&fake_audio_client_));
receiver_->GetEncodedAudioFrame(frame_encoded_callback);
diff --git a/media/cast/audio_sender/audio_encoder.cc b/media/cast/audio_sender/audio_encoder.cc
index f5739f7..f81ad26 100644
--- a/media/cast/audio_sender/audio_encoder.cc
+++ b/media/cast/audio_sender/audio_encoder.cc
@@ -60,7 +60,7 @@ void LogAudioFrameEncodedEvent(
// Base class that handles the common problem of feeding one or more AudioBus'
// data into a buffer and then, once the buffer is full, encoding the signal and
-// emitting an EncodedAudioFrame via the FrameEncodedCallback.
+// emitting an EncodedFrame via the FrameEncodedCallback.
//
// Subclasses complete the implementation by handling the actual encoding
// details.
@@ -81,10 +81,11 @@ class AudioEncoder::ImplBase
buffer_fill_end_(0),
frame_id_(0),
frame_rtp_timestamp_(0) {
+ // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
+ const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100;
if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
sampling_rate % kFramesPerSecond != 0 ||
- samples_per_frame_ * num_channels_ >
- transport::EncodedAudioFrame::kMaxNumberOfSamples) {
+ samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) {
cast_initialization_status_ = STATUS_INVALID_AUDIO_CONFIGURATION;
}
}
@@ -140,11 +141,13 @@ class AudioEncoder::ImplBase
if (buffer_fill_end_ < samples_per_frame_)
break;
- scoped_ptr<transport::EncodedAudioFrame> audio_frame(
- new transport::EncodedAudioFrame());
- audio_frame->codec = codec_;
+ scoped_ptr<transport::EncodedFrame> audio_frame(
+ new transport::EncodedFrame());
+ audio_frame->dependency = transport::EncodedFrame::KEY;
audio_frame->frame_id = frame_id_;
+ audio_frame->referenced_frame_id = frame_id_;
audio_frame->rtp_timestamp = frame_rtp_timestamp_;
+ audio_frame->reference_time = frame_capture_time_;
if (EncodeFromFilledBuffer(&audio_frame->data)) {
LogAudioFrameEncodedEvent(cast_environment_,
@@ -155,9 +158,7 @@ class AudioEncoder::ImplBase
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
- base::Bind(callback_,
- base::Passed(&audio_frame),
- frame_capture_time_));
+ base::Bind(callback_, base::Passed(&audio_frame)));
}
// Reset the internal buffer, frame ID, and timestamps for the next frame.
@@ -193,7 +194,7 @@ class AudioEncoder::ImplBase
// call.
int buffer_fill_end_;
- // A counter used to label EncodedAudioFrames.
+ // A counter used to label EncodedFrames.
uint32 frame_id_;
// The RTP timestamp for the next frame of encoded audio. This is defined as
diff --git a/media/cast/audio_sender/audio_encoder.h b/media/cast/audio_sender/audio_encoder.h
index d4c9e0f..2297672 100644
--- a/media/cast/audio_sender/audio_encoder.h
+++ b/media/cast/audio_sender/audio_encoder.h
@@ -21,8 +21,8 @@ namespace cast {
class AudioEncoder {
public:
- typedef base::Callback<void(scoped_ptr<transport::EncodedAudioFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
AudioEncoder(const scoped_refptr<CastEnvironment>& cast_environment,
const AudioSenderConfig& audio_config,
diff --git a/media/cast/audio_sender/audio_encoder_unittest.cc b/media/cast/audio_sender/audio_encoder_unittest.cc
index 0ca07bb..f9bc03e 100644
--- a/media/cast/audio_sender/audio_encoder_unittest.cc
+++ b/media/cast/audio_sender/audio_encoder_unittest.cc
@@ -40,11 +40,11 @@ class TestEncodedAudioFrameReceiver {
upper_bound_ = upper_bound;
}
- void FrameEncoded(scoped_ptr<transport::EncodedAudioFrame> encoded_frame,
- const base::TimeTicks& recorded_time) {
- EXPECT_EQ(codec_, encoded_frame->codec);
+ void FrameEncoded(scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ EXPECT_EQ(encoded_frame->dependency, transport::EncodedFrame::KEY);
EXPECT_EQ(static_cast<uint8>(frames_received_ & 0xff),
encoded_frame->frame_id);
+ EXPECT_EQ(encoded_frame->frame_id, encoded_frame->referenced_frame_id);
// RTP timestamps should be monotonically increasing and integer multiples
// of the fixed frame size.
EXPECT_LE(rtp_lower_bound_, encoded_frame->rtp_timestamp);
@@ -54,9 +54,9 @@ class TestEncodedAudioFrameReceiver {
EXPECT_EQ(0u, encoded_frame->rtp_timestamp % kSamplesPerFrame);
EXPECT_TRUE(!encoded_frame->data.empty());
- EXPECT_LE(lower_bound_, recorded_time);
- lower_bound_ = recorded_time;
- EXPECT_GT(upper_bound_, recorded_time);
+ EXPECT_LE(lower_bound_, encoded_frame->reference_time);
+ lower_bound_ = encoded_frame->reference_time;
+ EXPECT_GT(upper_bound_, encoded_frame->reference_time);
++frames_received_;
}
diff --git a/media/cast/audio_sender/audio_sender.cc b/media/cast/audio_sender/audio_sender.cc
index b61a260..4a288d3 100644
--- a/media/cast/audio_sender/audio_sender.cc
+++ b/media/cast/audio_sender/audio_sender.cc
@@ -95,13 +95,13 @@ void AudioSender::InsertAudio(scoped_ptr<AudioBus> audio_bus,
}
void AudioSender::SendEncodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& recorded_time) {
+ scoped_ptr<transport::EncodedFrame> audio_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtp_timestamp_helper_.StoreLatestTime(recorded_time,
+ DCHECK(!audio_frame->reference_time.is_null());
+ rtp_timestamp_helper_.StoreLatestTime(audio_frame->reference_time,
audio_frame->rtp_timestamp);
InitializeTimers();
- transport_sender_->InsertCodedAudioFrame(audio_frame.get(), recorded_time);
+ transport_sender_->InsertCodedAudioFrame(*audio_frame);
}
void AudioSender::ResendPackets(
diff --git a/media/cast/audio_sender/audio_sender.h b/media/cast/audio_sender/audio_sender.h
index 051c886..6ff464f 100644
--- a/media/cast/audio_sender/audio_sender.h
+++ b/media/cast/audio_sender/audio_sender.h
@@ -46,9 +46,7 @@ class AudioSender : public base::NonThreadSafe,
void IncomingRtcpPacket(scoped_ptr<Packet> packet);
protected:
- void SendEncodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendEncodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame);
private:
friend class LocalRtcpAudioSenderFeedback;
diff --git a/media/cast/audio_sender/audio_sender_unittest.cc b/media/cast/audio_sender/audio_sender_unittest.cc
index 047d2e4..f7e5c0a 100644
--- a/media/cast/audio_sender/audio_sender_unittest.cc
+++ b/media/cast/audio_sender/audio_sender_unittest.cc
@@ -108,8 +108,7 @@ TEST_F(AudioSenderTest, Encode20ms) {
TestAudioBusFactory::kMiddleANoteFreq,
0.5f).NextAudioBus(kDuration));
- base::TimeTicks recorded_time = base::TimeTicks::Now();
- audio_sender_->InsertAudio(bus.Pass(), recorded_time);
+ audio_sender_->InsertAudio(bus.Pass(), testing_clock_->NowTicks());
task_runner_->RunTasks();
EXPECT_GE(
transport_.number_of_rtp_packets() + transport_.number_of_rtcp_packets(),
@@ -124,8 +123,7 @@ TEST_F(AudioSenderTest, RtcpTimer) {
TestAudioBusFactory::kMiddleANoteFreq,
0.5f).NextAudioBus(kDuration));
- base::TimeTicks recorded_time = base::TimeTicks::Now();
- audio_sender_->InsertAudio(bus.Pass(), recorded_time);
+ audio_sender_->InsertAudio(bus.Pass(), testing_clock_->NowTicks());
task_runner_->RunTasks();
// Make sure that we send at least one RTCP packet.
diff --git a/media/cast/cast_receiver.h b/media/cast/cast_receiver.h
index fa6adac..22e7bda 100644
--- a/media/cast/cast_receiver.h
+++ b/media/cast/cast_receiver.h
@@ -40,28 +40,23 @@ typedef base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& playout_time,
bool is_continuous)> VideoFrameDecodedCallback;
-// The following callbacks deliver still-encoded audio/video frame data, along
-// with the frame's corresponding play-out time. The client should examine the
-// EncodedXXXFrame::frame_id field to determine whether any frames have been
+// The following callback delivers encoded frame data and metadata. The client
+// should examine the |frame_id| field to determine whether any frames have been
// dropped (i.e., frame_id should be incrementing by one each time). Note: A
// NULL pointer can be returned on error.
-typedef base::Callback<void(scoped_ptr<transport::EncodedAudioFrame>,
- const base::TimeTicks&)> AudioFrameEncodedCallback;
-typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
- const base::TimeTicks&)> VideoFrameEncodedCallback;
+typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
// This Class is thread safe.
class FrameReceiver : public base::RefCountedThreadSafe<FrameReceiver> {
public:
virtual void GetRawAudioFrame(const AudioFrameDecodedCallback& callback) = 0;
- virtual void GetCodedAudioFrame(
- const AudioFrameEncodedCallback& callback) = 0;
+ virtual void GetCodedAudioFrame(const FrameEncodedCallback& callback) = 0;
virtual void GetRawVideoFrame(const VideoFrameDecodedCallback& callback) = 0;
- virtual void GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) = 0;
+ virtual void GetEncodedVideoFrame(const FrameEncodedCallback& callback) = 0;
protected:
virtual ~FrameReceiver() {}
diff --git a/media/cast/cast_receiver_impl.cc b/media/cast/cast_receiver_impl.cc
index b38cd99..f502a0f 100644
--- a/media/cast/cast_receiver_impl.cc
+++ b/media/cast/cast_receiver_impl.cc
@@ -33,7 +33,7 @@ class LocalFrameReceiver : public FrameReceiver {
callback));
}
- virtual void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback)
+ virtual void GetEncodedVideoFrame(const FrameEncodedCallback& callback)
OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
@@ -51,7 +51,7 @@ class LocalFrameReceiver : public FrameReceiver {
callback));
}
- virtual void GetCodedAudioFrame(const AudioFrameEncodedCallback& callback)
+ virtual void GetCodedAudioFrame(const FrameEncodedCallback& callback)
OVERRIDE {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
diff --git a/media/cast/framer/frame_buffer.cc b/media/cast/framer/frame_buffer.cc
index 2bfdeb5..0b6fa83 100644
--- a/media/cast/framer/frame_buffer.cc
+++ b/media/cast/framer/frame_buffer.cc
@@ -28,6 +28,8 @@ void FrameBuffer::InsertPacket(const uint8* payload_data,
frame_id_ = rtp_header.frame_id;
max_packet_id_ = rtp_header.max_packet_id;
is_key_frame_ = rtp_header.is_key_frame;
+ if (is_key_frame_)
+ DCHECK_EQ(rtp_header.frame_id, rtp_header.reference_frame_id);
last_referenced_frame_id_ = rtp_header.reference_frame_id;
rtp_timestamp_ = rtp_header.rtp_timestamp;
}
@@ -57,44 +59,27 @@ bool FrameBuffer::Complete() const {
return num_packets_received_ - 1 == max_packet_id_;
}
-bool FrameBuffer::GetEncodedAudioFrame(
- transport::EncodedAudioFrame* audio_frame) const {
+bool FrameBuffer::AssembleEncodedFrame(transport::EncodedFrame* frame) const {
if (!Complete())
return false;
// Frame is complete -> construct.
- audio_frame->frame_id = frame_id_;
- audio_frame->rtp_timestamp = rtp_timestamp_;
+ if (is_key_frame_)
+ frame->dependency = transport::EncodedFrame::KEY;
+ else if (frame_id_ == last_referenced_frame_id_)
+ frame->dependency = transport::EncodedFrame::INDEPENDENT;
+ else
+ frame->dependency = transport::EncodedFrame::DEPENDENT;
+ frame->frame_id = frame_id_;
+ frame->referenced_frame_id = last_referenced_frame_id_;
+ frame->rtp_timestamp = rtp_timestamp_;
// Build the data vector.
- audio_frame->data.clear();
- audio_frame->data.reserve(total_data_size_);
+ frame->data.clear();
+ frame->data.reserve(total_data_size_);
PacketMap::const_iterator it;
- for (it = packets_.begin(); it != packets_.end(); ++it) {
- audio_frame->data.insert(
- audio_frame->data.end(), it->second.begin(), it->second.end());
- }
- return true;
-}
-
-bool FrameBuffer::GetEncodedVideoFrame(
- transport::EncodedVideoFrame* video_frame) const {
- if (!Complete())
- return false;
- // Frame is complete -> construct.
- video_frame->key_frame = is_key_frame_;
- video_frame->frame_id = frame_id_;
- video_frame->last_referenced_frame_id = last_referenced_frame_id_;
- video_frame->rtp_timestamp = rtp_timestamp_;
-
- // Build the data vector.
- video_frame->data.clear();
- video_frame->data.reserve(total_data_size_);
- PacketMap::const_iterator it;
- for (it = packets_.begin(); it != packets_.end(); ++it) {
- video_frame->data.insert(
- video_frame->data.end(), it->second.begin(), it->second.end());
- }
+ for (it = packets_.begin(); it != packets_.end(); ++it)
+ frame->data.insert(frame->data.end(), it->second.begin(), it->second.end());
return true;
}
diff --git a/media/cast/framer/frame_buffer.h b/media/cast/framer/frame_buffer.h
index 65df021d..d4d5ded 100644
--- a/media/cast/framer/frame_buffer.h
+++ b/media/cast/framer/frame_buffer.h
@@ -25,9 +25,11 @@ class FrameBuffer {
const RtpCastHeader& rtp_header);
bool Complete() const;
- bool GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame) const;
-
- bool GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame) const;
+ // If a frame is complete, sets the frame IDs and RTP timestamp in |frame|,
+ // and also copies the data from all packets into the data field in |frame|.
+ // Returns true if the frame was complete; false if incomplete and |frame|
+ // remains unchanged.
+ bool AssembleEncodedFrame(transport::EncodedFrame* frame) const;
bool is_key_frame() const { return is_key_frame_; }
diff --git a/media/cast/framer/frame_buffer_unittest.cc b/media/cast/framer/frame_buffer_unittest.cc
index c00aa2b..d6844f3e 100644
--- a/media/cast/framer/frame_buffer_unittest.cc
+++ b/media/cast/framer/frame_buffer_unittest.cc
@@ -29,26 +29,26 @@ TEST_F(FrameBufferTest, OnePacketInsertSanity) {
rtp_header_.frame_id = 5;
rtp_header_.reference_frame_id = 5;
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
- transport::EncodedVideoFrame frame;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(5u, frame.referenced_frame_id);
EXPECT_EQ(3000u, frame.rtp_timestamp);
}
TEST_F(FrameBufferTest, EmptyBuffer) {
EXPECT_FALSE(buffer_.Complete());
- EXPECT_FALSE(buffer_.is_key_frame());
- transport::EncodedVideoFrame frame;
- EXPECT_FALSE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_FALSE(buffer_.AssembleEncodedFrame(&frame));
}
TEST_F(FrameBufferTest, DefaultOnePacketFrame) {
buffer_.InsertPacket(payload_.data(), payload_.size(), rtp_header_);
EXPECT_TRUE(buffer_.Complete());
EXPECT_FALSE(buffer_.is_key_frame());
- transport::EncodedVideoFrame frame;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(payload_.size(), frame.data.size());
}
@@ -63,8 +63,8 @@ TEST_F(FrameBufferTest, MultiplePacketFrame) {
++rtp_header_.packet_id;
EXPECT_TRUE(buffer_.Complete());
EXPECT_TRUE(buffer_.is_key_frame());
- transport::EncodedVideoFrame frame;
- EXPECT_TRUE(buffer_.GetEncodedVideoFrame(&frame));
+ transport::EncodedFrame frame;
+ EXPECT_TRUE(buffer_.AssembleEncodedFrame(&frame));
EXPECT_EQ(3 * payload_.size(), frame.data.size());
}
diff --git a/media/cast/framer/framer.cc b/media/cast/framer/framer.cc
index d510d8b..ee5e6aa 100644
--- a/media/cast/framer/framer.cc
+++ b/media/cast/framer/framer.cc
@@ -69,7 +69,7 @@ bool Framer::InsertPacket(const uint8* payload_data,
}
// This does not release the frame.
-bool Framer::GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame,
+bool Framer::GetEncodedAudioFrame(transport::EncodedFrame* audio_frame,
bool* next_frame) {
uint32 frame_id;
// Find frame id.
@@ -88,11 +88,11 @@ bool Framer::GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame,
if (it == frames_.end())
return false;
- return it->second->GetEncodedAudioFrame(audio_frame);
+ return it->second->AssembleEncodedFrame(audio_frame);
}
// This does not release the frame.
-bool Framer::GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame,
+bool Framer::GetEncodedVideoFrame(transport::EncodedFrame* video_frame,
bool* next_frame) {
uint32 frame_id;
// Find frame id.
@@ -115,7 +115,7 @@ bool Framer::GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame,
if (it == frames_.end())
return false;
- return it->second->GetEncodedVideoFrame(video_frame);
+ return it->second->AssembleEncodedFrame(video_frame);
}
void Framer::Reset() {
diff --git a/media/cast/framer/framer.h b/media/cast/framer/framer.h
index eb67064..0c7397b 100644
--- a/media/cast/framer/framer.h
+++ b/media/cast/framer/framer.h
@@ -44,10 +44,10 @@ class Framer {
// frame.
// Returns false if the frame does not exist or if the frame is not complete
// within the given time frame.
- bool GetEncodedVideoFrame(transport::EncodedVideoFrame* video_frame,
+ bool GetEncodedVideoFrame(transport::EncodedFrame* video_frame,
bool* next_frame);
- bool GetEncodedAudioFrame(transport::EncodedAudioFrame* audio_frame,
+ bool GetEncodedAudioFrame(transport::EncodedFrame* audio_frame,
bool* next_frame);
void ReleaseFrame(uint32 frame_id);
diff --git a/media/cast/framer/framer_unittest.cc b/media/cast/framer/framer_unittest.cc
index 06a340e..5cb12ce 100644
--- a/media/cast/framer/framer_unittest.cc
+++ b/media/cast/framer/framer_unittest.cc
@@ -33,13 +33,13 @@ class FramerTest : public ::testing::Test {
};
TEST_F(FramerTest, EmptyState) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
EXPECT_FALSE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
}
TEST_F(FramerTest, AlwaysStartWithKey) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool duplicate = false;
@@ -57,13 +57,14 @@ TEST_F(FramerTest, AlwaysStartWithKey) {
EXPECT_TRUE(complete);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(1u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, CompleteFrame) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool duplicate = false;
@@ -75,8 +76,9 @@ TEST_F(FramerTest, CompleteFrame) {
EXPECT_TRUE(complete);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Incomplete delta.
@@ -100,7 +102,7 @@ TEST_F(FramerTest, CompleteFrame) {
}
TEST_F(FramerTest, DuplicatePackets) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool duplicate = false;
@@ -131,7 +133,9 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_TRUE(complete);
EXPECT_FALSE(duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
// Add same packet again in complete key frame.
duplicate = false;
@@ -140,7 +144,9 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Incomplete delta frame.
@@ -171,7 +177,9 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_TRUE(complete);
EXPECT_FALSE(duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
// Add same packet again in complete delta frame.
duplicate = false;
@@ -180,11 +188,13 @@ TEST_F(FramerTest, DuplicatePackets) {
EXPECT_FALSE(complete);
EXPECT_TRUE(duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
}
TEST_F(FramerTest, ContinuousSequence) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool duplicate = false;
@@ -196,8 +206,9 @@ TEST_F(FramerTest, ContinuousSequence) {
EXPECT_TRUE(complete);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
- EXPECT_TRUE(frame.key_frame);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Complete - not continuous.
@@ -212,7 +223,7 @@ TEST_F(FramerTest, ContinuousSequence) {
TEST_F(FramerTest, Wrap) {
// Insert key frame, frame_id = 255 (will jump to that)
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool duplicate = false;
@@ -224,7 +235,9 @@ TEST_F(FramerTest, Wrap) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert wrapped delta frame - should be continuous.
@@ -234,12 +247,14 @@ TEST_F(FramerTest, Wrap) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, Reset) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool complete = false;
bool duplicate = false;
@@ -254,7 +269,7 @@ TEST_F(FramerTest, Reset) {
}
TEST_F(FramerTest, RequireKeyAfterReset) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool duplicate = false;
@@ -276,7 +291,7 @@ TEST_F(FramerTest, RequireKeyAfterReset) {
}
TEST_F(FramerTest, BasicNonLastReferenceId) {
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool duplicate = false;
@@ -300,7 +315,7 @@ TEST_F(FramerTest, BasicNonLastReferenceId) {
TEST_F(FramerTest, InOrderReferenceFrameSelection) {
// Create pattern: 0, 1, 4, 5.
- transport::EncodedVideoFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool duplicate = false;
@@ -324,15 +339,21 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
framer_.InsertPacket(
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(0u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(1u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_FALSE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(4u, frame.frame_id);
+ EXPECT_EQ(0u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert remaining packet of frame #2 - should no be continuous.
rtp_header_.frame_id = 2;
@@ -348,12 +369,14 @@ TEST_F(FramerTest, InOrderReferenceFrameSelection) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedVideoFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT, frame.dependency);
EXPECT_EQ(5u, frame.frame_id);
+ EXPECT_EQ(4u, frame.referenced_frame_id);
}
TEST_F(FramerTest, AudioWrap) {
// All audio frames are marked as key frames.
- transport::EncodedAudioFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool duplicate = false;
@@ -365,7 +388,9 @@ TEST_F(FramerTest, AudioWrap) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(254u, frame.frame_id);
+ EXPECT_EQ(254u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
rtp_header_.frame_id = 255;
@@ -381,18 +406,22 @@ TEST_F(FramerTest, AudioWrap) {
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// All audio frames are marked as key frames.
- transport::EncodedAudioFrame frame;
+ transport::EncodedFrame frame;
bool next_frame = false;
bool duplicate = false;
@@ -404,7 +433,9 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
payload_.data(), payload_.size(), rtp_header_, &duplicate);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(253u, frame.frame_id);
+ EXPECT_EQ(253u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
// Insert third and fourth packets.
@@ -420,11 +451,15 @@ TEST_F(FramerTest, AudioWrapWithMissingFrame) {
// Get third and fourth packets.
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
EXPECT_FALSE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(255u, frame.frame_id);
+ EXPECT_EQ(255u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
EXPECT_TRUE(framer_.GetEncodedAudioFrame(&frame, &next_frame));
EXPECT_TRUE(next_frame);
+ EXPECT_EQ(transport::EncodedFrame::KEY, frame.dependency);
EXPECT_EQ(256u, frame.frame_id);
+ EXPECT_EQ(256u, frame.referenced_frame_id);
framer_.ReleaseFrame(frame.frame_id);
}
diff --git a/media/cast/test/encode_decode_test.cc b/media/cast/test/encode_decode_test.cc
deleted file mode 100644
index 67edbc8..0000000
--- a/media/cast/test/encode_decode_test.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Joint encoder and decoder testing.
-// These tests operate directly on the VP8 encoder and decoder, not the
-// transport layer, and are targeted at validating the bit stream.
-
-#include <gtest/gtest.h>
-#include <stdint.h>
-
-#include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/test/fake_single_thread_task_runner.h"
-#include "media/cast/test/utility/video_utility.h"
-#include "media/cast/video_receiver/codecs/vp8/vp8_decoder.h"
-#include "media/cast/video_sender/codecs/vp8/vp8_encoder.h"
-
-namespace media {
-namespace cast {
-
-static const int64 kStartMillisecond = INT64_C(1245);
-static const int kWidth = 1280;
-static const int kHeight = 720;
-static const int kStartbitrate = 4000000;
-static const int kMaxQp = 54;
-static const int kMinQp = 4;
-static const int kMaxFrameRate = 30;
-
-namespace {
-class EncodeDecodeTestFrameCallback
- : public base::RefCountedThreadSafe<EncodeDecodeTestFrameCallback> {
- public:
- EncodeDecodeTestFrameCallback() : num_called_(0) {
- gfx::Size size(kWidth, kHeight);
- original_frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
- }
-
- void SetFrameStartValue(int start_value) {
- PopulateVideoFrame(original_frame_.get(), start_value);
- }
-
- void DecodeComplete(const scoped_refptr<media::VideoFrame>& decoded_frame,
- const base::TimeTicks& render_time) {
- ++num_called_;
- // Compare resolution.
- EXPECT_EQ(original_frame_->coded_size().width(),
- decoded_frame->coded_size().width());
- EXPECT_EQ(original_frame_->coded_size().height(),
- decoded_frame->coded_size().height());
- // Compare data.
- EXPECT_GT(I420PSNR(original_frame_, decoded_frame), 40.0);
- }
-
- int num_called() const { return num_called_; }
-
- protected:
- virtual ~EncodeDecodeTestFrameCallback() {}
-
- private:
- friend class base::RefCountedThreadSafe<EncodeDecodeTestFrameCallback>;
-
- int num_called_;
- scoped_refptr<media::VideoFrame> original_frame_;
-};
-} // namespace
-
-class EncodeDecodeTest : public ::testing::Test {
- protected:
- EncodeDecodeTest()
- : testing_clock_(new base::SimpleTestTickClock()),
- task_runner_(new test::FakeSingleThreadTaskRunner(testing_clock_)),
- // CastEnvironment will only be used by the vp8 decoder; Enable only the
- // video and main threads.
- cast_environment_(new CastEnvironment(
- scoped_ptr<base::TickClock>(testing_clock_).Pass(),
- task_runner_,
- NULL,
- task_runner_)),
- test_callback_(new EncodeDecodeTestFrameCallback()) {
- testing_clock_->Advance(
- base::TimeDelta::FromMilliseconds(kStartMillisecond));
- encoder_config_.max_number_of_video_buffers_used = 1;
- encoder_config_.width = kWidth;
- encoder_config_.height = kHeight;
- encoder_config_.start_bitrate = kStartbitrate;
- encoder_config_.min_qp = kMaxQp;
- encoder_config_.min_qp = kMinQp;
- encoder_config_.max_frame_rate = kMaxFrameRate;
- int max_unacked_frames = 1;
- encoder_.reset(new Vp8Encoder(encoder_config_, max_unacked_frames));
- // Initialize to use one core.
- decoder_.reset(new Vp8Decoder(cast_environment_));
- }
-
- virtual ~EncodeDecodeTest() {}
-
- virtual void SetUp() OVERRIDE {
- // Create test frame.
- int start_value = 10; // Random value to start from.
- gfx::Size size(encoder_config_.width, encoder_config_.height);
- video_frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
- PopulateVideoFrame(video_frame_, start_value);
- test_callback_->SetFrameStartValue(start_value);
- }
-
- VideoSenderConfig encoder_config_;
- scoped_ptr<Vp8Encoder> encoder_;
- scoped_ptr<Vp8Decoder> decoder_;
- scoped_refptr<media::VideoFrame> video_frame_;
- base::SimpleTestTickClock* testing_clock_; // Owned by CastEnvironment.
- scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- scoped_refptr<CastEnvironment> cast_environment_;
- scoped_refptr<EncodeDecodeTestFrameCallback> test_callback_;
-};
-
-TEST_F(EncodeDecodeTest, BasicEncodeDecode) {
- transport::EncodedVideoFrame encoded_frame;
- encoder_->Initialize();
- // Encode frame.
- encoder_->Encode(video_frame_, &encoded_frame);
- EXPECT_GT(encoded_frame.data.size(), UINT64_C(0));
- // Decode frame.
- decoder_->Decode(&encoded_frame,
- base::TimeTicks(),
- base::Bind(&EncodeDecodeTestFrameCallback::DecodeComplete,
- test_callback_));
- task_runner_->RunTasks();
-}
-
-} // namespace cast
-} // namespace media
diff --git a/media/cast/test/end2end_unittest.cc b/media/cast/test/end2end_unittest.cc
index e668ed6..f1567a5 100644
--- a/media/cast/test/end2end_unittest.cc
+++ b/media/cast/test/end2end_unittest.cc
@@ -18,7 +18,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_byteorder.h"
#include "base/test/simple_test_tick_clock.h"
@@ -306,9 +305,7 @@ class TestReceiverAudioCallback
EXPECT_TRUE(is_continuous);
}
- void CheckCodedAudioFrame(
- scoped_ptr<transport::EncodedAudioFrame> audio_frame,
- const base::TimeTicks& playout_time) {
+ void CheckCodedAudioFrame(scoped_ptr<transport::EncodedFrame> audio_frame) {
ASSERT_TRUE(!!audio_frame);
ASSERT_FALSE(expected_frames_.empty());
const ExpectedAudioFrame& expected_audio_frame =
@@ -323,7 +320,7 @@ class TestReceiverAudioCallback
expected_audio_frame.audio_bus->frames(),
num_elements);
int16* const pcm_data =
- reinterpret_cast<int16*>(string_as_array(&audio_frame->data));
+ reinterpret_cast<int16*>(audio_frame->mutable_bytes());
for (int i = 0; i < num_elements; ++i)
pcm_data[i] = static_cast<int16>(base::NetToHost16(pcm_data[i]));
scoped_ptr<AudioBus> audio_bus(
@@ -332,7 +329,7 @@ class TestReceiverAudioCallback
audio_bus->FromInterleaved(pcm_data, audio_bus->frames(), sizeof(int16));
// Delegate the checking from here...
- CheckAudioFrame(audio_bus.Pass(), playout_time, true);
+ CheckAudioFrame(audio_bus.Pass(), audio_frame->reference_time, true);
}
int number_times_called() const { return num_called_; }
diff --git a/media/cast/transport/cast_transport_config.cc b/media/cast/transport/cast_transport_config.cc
index 43b1f5d..2c40a19 100644
--- a/media/cast/transport/cast_transport_config.cc
+++ b/media/cast/transport/cast_transport_config.cc
@@ -33,17 +33,13 @@ CastTransportVideoConfig::CastTransportVideoConfig() : codec(kVp8) {}
CastTransportVideoConfig::~CastTransportVideoConfig() {}
-EncodedVideoFrame::EncodedVideoFrame()
- : codec(kVp8),
- key_frame(false),
+EncodedFrame::EncodedFrame()
+ : dependency(UNKNOWN_DEPENDENCY),
frame_id(0),
- last_referenced_frame_id(0),
+ referenced_frame_id(0),
rtp_timestamp(0) {}
-EncodedVideoFrame::~EncodedVideoFrame() {}
-EncodedAudioFrame::EncodedAudioFrame()
- : codec(kOpus), frame_id(0), rtp_timestamp(0) {}
-EncodedAudioFrame::~EncodedAudioFrame() {}
+EncodedFrame::~EncodedFrame() {}
RtcpSenderInfo::RtcpSenderInfo()
: ntp_seconds(0),
diff --git a/media/cast/transport/cast_transport_config.h b/media/cast/transport/cast_transport_config.h
index d7d3196..b25ce4e 100644
--- a/media/cast/transport/cast_transport_config.h
+++ b/media/cast/transport/cast_transport_config.h
@@ -12,6 +12,7 @@
#include "base/callback.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
+#include "base/stl_util.h"
#include "media/cast/transport/cast_transport_defines.h"
#include "net/base/ip_endpoint.h"
@@ -69,27 +70,68 @@ struct CastTransportVideoConfig {
VideoCodec codec;
};
-struct EncodedVideoFrame {
- EncodedVideoFrame();
- ~EncodedVideoFrame();
+// A combination of metadata and data for one encoded frame. This can contain
+// audio data or video data or other.
+struct EncodedFrame {
+ enum Dependency {
+ // "null" value, used to indicate whether |dependency| has been set.
+ UNKNOWN_DEPENDENCY,
- VideoCodec codec;
- bool key_frame;
+ // Not decodable without the reference frame indicated by
+ // |referenced_frame_id|.
+ DEPENDENT,
+
+ // Independently decodable.
+ INDEPENDENT,
+
+ // Independently decodable, and no future frames will depend on any frames
+ // before this one.
+ KEY,
+
+ DEPENDENCY_LAST = KEY
+ };
+
+ EncodedFrame();
+ ~EncodedFrame();
+
+ // Convenience accessors to data as an array of uint8 elements.
+ const uint8* bytes() const {
+ return reinterpret_cast<uint8*>(string_as_array(
+ const_cast<std::string*>(&data)));
+ }
+ uint8* mutable_bytes() {
+ return reinterpret_cast<uint8*>(string_as_array(&data));
+ }
+
+ // This frame's dependency relationship with respect to other frames.
+ Dependency dependency;
+
+ // The label associated with this frame. Implies an ordering relative to
+ // other frames in the same stream.
uint32 frame_id;
- uint32 last_referenced_frame_id;
- uint32 rtp_timestamp;
- std::string data;
-};
-struct EncodedAudioFrame {
- EncodedAudioFrame();
- ~EncodedAudioFrame();
+ // The label associated with the frame upon which this frame depends. If
+ // this frame does not require any other frame in order to become decodable
+ // (e.g., key frames), |referenced_frame_id| must equal |frame_id|.
+ uint32 referenced_frame_id;
- AudioCodec codec;
- uint32 frame_id; // Needed to release the frame.
+ // The stream timestamp, on the timeline of the signal data. For example, RTP
+ // timestamps for audio are usually defined as the total number of audio
+ // samples encoded in all prior frames. A playback system uses this value to
+ // detect gaps in the stream, and otherwise stretch the signal to match
+ // playout targets.
uint32 rtp_timestamp;
- // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
- static const int kMaxNumberOfSamples = 48 * 2 * 100;
+
+ // The common reference clock timestamp for this frame. This value originates
+ // from a sender and is used to provide lip synchronization between streams in
+ // a receiver. Thus, in the sender context, this is set to the time at which
+ // the frame was captured/recorded. In the receiver context, this is set to
+ // the target playout time. Over a sequence of frames, this time value is
+ // expected to drift with respect to the elapsed time implied by the RTP
+ // timestamps; and it may not necessarily increment with precise regularity.
+ base::TimeTicks reference_time;
+
+ // The encoded signal data.
std::string data;
};
diff --git a/media/cast/transport/cast_transport_sender.h b/media/cast/transport/cast_transport_sender.h
index 0714995..a69b741 100644
--- a/media/cast/transport/cast_transport_sender.h
+++ b/media/cast/transport/cast_transport_sender.h
@@ -77,11 +77,8 @@ class CastTransportSender : public base::NonThreadSafe {
// The following two functions handle the encoded media frames (audio and
// video) to be processed.
// Frames will be encrypted, packetized and transmitted to the network.
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) = 0;
-
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) = 0;
+ virtual void InsertCodedAudioFrame(const EncodedFrame& audio_frame) = 0;
+ virtual void InsertCodedVideoFrame(const EncodedFrame& video_frame) = 0;
// Builds an RTCP packet and sends it to the network.
// |ntp_seconds|, |ntp_fraction| and |rtp_timestamp| are used in the
diff --git a/media/cast/transport/cast_transport_sender_impl.cc b/media/cast/transport/cast_transport_sender_impl.cc
index 8d2a871..8f07832 100644
--- a/media/cast/transport/cast_transport_sender_impl.cc
+++ b/media/cast/transport/cast_transport_sender_impl.cc
@@ -100,17 +100,15 @@ void CastTransportSenderImpl::SetPacketReceiver(
}
void CastTransportSenderImpl::InsertCodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
+ const EncodedFrame& audio_frame) {
DCHECK(audio_sender_) << "Audio sender uninitialized";
- audio_sender_->InsertCodedAudioFrame(audio_frame, recorded_time);
+ audio_sender_->SendFrame(audio_frame);
}
void CastTransportSenderImpl::InsertCodedVideoFrame(
- const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
+ const EncodedFrame& video_frame) {
DCHECK(video_sender_) << "Video sender uninitialized";
- video_sender_->InsertCodedVideoFrame(video_frame, capture_time);
+ video_sender_->SendFrame(video_frame);
}
void CastTransportSenderImpl::SendRtcpFromRtpSender(
diff --git a/media/cast/transport/cast_transport_sender_impl.h b/media/cast/transport/cast_transport_sender_impl.h
index ca84d12..91b03c5 100644
--- a/media/cast/transport/cast_transport_sender_impl.h
+++ b/media/cast/transport/cast_transport_sender_impl.h
@@ -54,13 +54,8 @@ class CastTransportSenderImpl : public CastTransportSender {
virtual void SetPacketReceiver(const PacketReceiverCallback& packet_receiver)
OVERRIDE;
- virtual void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time)
- OVERRIDE;
-
- virtual void InsertCodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time)
- OVERRIDE;
+ virtual void InsertCodedAudioFrame(const EncodedFrame& audio_frame) OVERRIDE;
+ virtual void InsertCodedVideoFrame(const EncodedFrame& video_frame) OVERRIDE;
virtual void SendRtcpFromRtpSender(uint32 packet_type_flags,
uint32 ntp_seconds,
diff --git a/media/cast/transport/rtp_sender/mock_rtp_sender.h b/media/cast/transport/rtp_sender/mock_rtp_sender.h
deleted file mode 100644
index 6bc54d5..0000000
--- a/media/cast/transport/rtp_sender/mock_rtp_sender.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_TRANSPORT_RTP_SENDER_MOCK_RTP_SENDER_H_
-#define MEDIA_CAST_TRANSPORT_RTP_SENDER_MOCK_RTP_SENDER_H_
-
-#include <vector>
-
-#include "media/cast/transport/rtp_sender/rtp_sender.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-namespace cast {
-namespace transport {
-
-class MockRtpSender : public RtpSender {
- public:
- MOCK_METHOD2(IncomingEncodedVideoFrame,
- bool(const EncodedVideoFrame& frame, int64 capture_time));
-
- MOCK_METHOD2(IncomingEncodedAudioFrame,
- bool(const EncodedAudioFrame& frame, int64 recorded_time));
-
- MOCK_METHOD3(ResendPacket,
- bool(bool is_audio, uint32 frame_id, uint16 packet_id));
-
- MOCK_METHOD0(RtpStatistics, void());
-};
-
-} // namespace transport
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_TRANSPORT_RTP_SENDER_MOCK_RTP_SENDER_H_
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
index f4c292f..50ec42b 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.cc
@@ -45,61 +45,25 @@ RtpPacketizer::RtpPacketizer(PacedSender* const transport,
RtpPacketizer::~RtpPacketizer() {}
-void RtpPacketizer::IncomingEncodedVideoFrame(
- const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
- DCHECK(!config_.audio) << "Invalid state";
- if (config_.audio)
- return;
-
- Cast(video_frame->key_frame,
- video_frame->frame_id,
- video_frame->last_referenced_frame_id,
- video_frame->rtp_timestamp,
- video_frame->data,
- capture_time);
-}
-
-void RtpPacketizer::IncomingEncodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
- DCHECK(config_.audio) << "Invalid state";
- if (!config_.audio)
- return;
-
- Cast(true,
- audio_frame->frame_id,
- 0,
- audio_frame->rtp_timestamp,
- audio_frame->data,
- recorded_time);
-}
-
uint16 RtpPacketizer::NextSequenceNumber() {
++sequence_number_;
return sequence_number_ - 1;
}
-// TODO(mikhal): Switch to pass data with a const_ref.
-void RtpPacketizer::Cast(bool is_key,
- uint32 frame_id,
- uint32 reference_frame_id,
- uint32 timestamp,
- const std::string& data,
- const base::TimeTicks& capture_time) {
+void RtpPacketizer::SendFrameAsPackets(const EncodedFrame& frame) {
uint16 rtp_header_length = kCommonRtpHeaderLength + kCastRtpHeaderLength;
uint16 max_length = config_.max_payload_length - rtp_header_length - 1;
- rtp_timestamp_ = timestamp;
+ rtp_timestamp_ = frame.rtp_timestamp;
// Split the payload evenly (round number up).
- size_t num_packets = (data.size() + max_length) / max_length;
- size_t payload_length = (data.size() + num_packets) / num_packets;
+ size_t num_packets = (frame.data.size() + max_length) / max_length;
+ size_t payload_length = (frame.data.size() + num_packets) / num_packets;
DCHECK_LE(payload_length, max_length) << "Invalid argument";
SendPacketVector packets;
- size_t remaining_size = data.size();
- std::string::const_iterator data_iter = data.begin();
+ size_t remaining_size = frame.data.size();
+ std::string::const_iterator data_iter = frame.data.begin();
while (remaining_size > 0) {
PacketRef packet(new base::RefCountedData<Packet>);
@@ -107,31 +71,35 @@ void RtpPacketizer::Cast(bool is_key,
payload_length = remaining_size;
}
remaining_size -= payload_length;
- BuildCommonRTPheader(&packet->data, remaining_size == 0, timestamp);
+ BuildCommonRTPheader(
+ &packet->data, remaining_size == 0, frame.rtp_timestamp);
// Build Cast header.
- packet->data.push_back((is_key ? kCastKeyFrameBitMask : 0) |
- kCastReferenceFrameIdBitMask);
- packet->data.push_back(frame_id);
+ // TODO(miu): Should we always set the ref frame bit and the ref_frame_id?
+ DCHECK_NE(frame.dependency, EncodedFrame::UNKNOWN_DEPENDENCY);
+ packet->data.push_back(
+ ((frame.dependency == EncodedFrame::KEY) ? kCastKeyFrameBitMask : 0) |
+ kCastReferenceFrameIdBitMask);
+ packet->data.push_back(static_cast<uint8>(frame.frame_id));
size_t start_size = packet->data.size();
packet->data.resize(start_size + 4);
base::BigEndianWriter big_endian_writer(
reinterpret_cast<char*>(&(packet->data[start_size])), 4);
big_endian_writer.WriteU16(packet_id_);
big_endian_writer.WriteU16(static_cast<uint16>(num_packets - 1));
- packet->data.push_back(static_cast<uint8>(reference_frame_id));
+ packet->data.push_back(static_cast<uint8>(frame.referenced_frame_id));
// Copy payload data.
packet->data.insert(packet->data.end(),
data_iter,
data_iter + payload_length);
- PacketKey key = PacedPacketSender::MakePacketKey(capture_time,
+ PacketKey key = PacedPacketSender::MakePacketKey(frame.reference_time,
config_.ssrc,
packet_id_);
// Store packet.
- packet_storage_->StorePacket(frame_id, packet_id_, key, packet);
+ packet_storage_->StorePacket(frame.frame_id, packet_id_, key, packet);
++packet_id_;
data_iter += payload_length;
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
index ff5ad84..ebdbf01 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer.h
@@ -55,15 +55,7 @@ class RtpPacketizer {
RtpPacketizerConfig rtp_packetizer_config);
~RtpPacketizer();
- // The video_frame objects ownership is handled by the main cast thread.
- // |capture_time| is only used for scheduling of outgoing packets.
- void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
-
- // The audio_frame objects ownership is handled by the main cast thread.
- // |recorded_time| is only used for scheduling of outgoing packets.
- void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendFrameAsPackets(const EncodedFrame& frame);
// Return the next sequence number, and increment by one. Enables unique
// incremental sequence numbers for every packet (including retransmissions).
@@ -73,13 +65,6 @@ class RtpPacketizer {
size_t send_octet_count() const { return send_octet_count_; }
private:
- void Cast(bool is_key,
- uint32 frame_id,
- uint32 reference_frame_id,
- uint32 timestamp,
- const std::string& data,
- const base::TimeTicks& capture_time);
-
void BuildCommonRTPheader(Packet* packet, bool marker_bit, uint32 time_stamp);
RtpPacketizerConfig config_;
diff --git a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
index 0545461..64def4c 100644
--- a/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
+++ b/media/cast/transport/rtp_sender/rtp_packetizer/rtp_packetizer_unittest.cc
@@ -112,9 +112,9 @@ class RtpPacketizerTest : public ::testing::Test {
pacer_->RegisterVideoSsrc(config_.ssrc);
rtp_packetizer_.reset(new RtpPacketizer(
pacer_.get(), &packet_storage_, config_));
- video_frame_.key_frame = false;
+ video_frame_.dependency = EncodedFrame::DEPENDENT;
video_frame_.frame_id = 0;
- video_frame_.last_referenced_frame_id = kStartFrameId;
+ video_frame_.referenced_frame_id = kStartFrameId;
video_frame_.data.assign(kFrameSize, 123);
video_frame_.rtp_timestamp =
GetVideoRtpTimestamp(testing_clock_.NowTicks());
@@ -130,7 +130,7 @@ class RtpPacketizerTest : public ::testing::Test {
base::SimpleTestTickClock testing_clock_;
scoped_refptr<test::FakeSingleThreadTaskRunner> task_runner_;
- EncodedVideoFrame video_frame_;
+ EncodedFrame video_frame_;
PacketStorage packet_storage_;
RtpPacketizerConfig config_;
scoped_ptr<TestRtpPacketTransport> transport_;
@@ -146,9 +146,9 @@ TEST_F(RtpPacketizerTest, SendStandardPackets) {
transport_->set_expected_number_of_packets(expected_num_of_packets);
transport_->set_rtp_timestamp(video_frame_.rtp_timestamp);
- base::TimeTicks time;
- time += base::TimeDelta::FromMilliseconds(kTimestampMs);
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_, time);
+ testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
+ video_frame_.reference_time = testing_clock_.NowTicks();
+ rtp_packetizer_->SendFrameAsPackets(video_frame_);
RunTasks(33 + 1);
EXPECT_EQ(expected_num_of_packets, transport_->number_of_packets_received());
}
@@ -162,8 +162,8 @@ TEST_F(RtpPacketizerTest, Stats) {
transport_->set_rtp_timestamp(video_frame_.rtp_timestamp);
testing_clock_.Advance(base::TimeDelta::FromMilliseconds(kTimestampMs));
- rtp_packetizer_->IncomingEncodedVideoFrame(&video_frame_,
- testing_clock_.NowTicks());
+ video_frame_.reference_time = testing_clock_.NowTicks();
+ rtp_packetizer_->SendFrameAsPackets(video_frame_);
RunTasks(33 + 1);
EXPECT_EQ(expected_num_of_packets, rtp_packetizer_->send_packet_count());
EXPECT_EQ(kFrameSize, rtp_packetizer_->send_octet_count());
diff --git a/media/cast/transport/rtp_sender/rtp_sender.cc b/media/cast/transport/rtp_sender/rtp_sender.cc
index 0963bf7..735d086 100644
--- a/media/cast/transport/rtp_sender/rtp_sender.cc
+++ b/media/cast/transport/rtp_sender/rtp_sender.cc
@@ -55,17 +55,9 @@ bool RtpSender::InitializeVideo(const CastTransportVideoConfig& config) {
return true;
}
-void RtpSender::IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time) {
+void RtpSender::SendFrame(const EncodedFrame& frame) {
DCHECK(packetizer_);
- packetizer_->IncomingEncodedVideoFrame(video_frame, capture_time);
-}
-
-void RtpSender::IncomingEncodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
- DCHECK(packetizer_);
- packetizer_->IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ packetizer_->SendFrameAsPackets(frame);
}
void RtpSender::ResendPackets(
diff --git a/media/cast/transport/rtp_sender/rtp_sender.h b/media/cast/transport/rtp_sender/rtp_sender.h
index c027024..e1fbfe2 100644
--- a/media/cast/transport/rtp_sender/rtp_sender.h
+++ b/media/cast/transport/rtp_sender/rtp_sender.h
@@ -48,13 +48,7 @@ class RtpSender {
// video frames. Returns false if configuration is invalid.
bool InitializeVideo(const CastTransportVideoConfig& config);
- // The video_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedVideoFrame(const EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
-
- // The audio_frame objects ownership is handled by the main cast thread.
- void IncomingEncodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendFrame(const EncodedFrame& frame);
void ResendPackets(const MissingFramesAndPacketsMap& missing_packets);
diff --git a/media/cast/transport/transport_audio_sender.cc b/media/cast/transport/transport_audio_sender.cc
index 423d314..5d06c8b 100644
--- a/media/cast/transport/transport_audio_sender.cc
+++ b/media/cast/transport/transport_audio_sender.cc
@@ -27,26 +27,24 @@ TransportAudioSender::TransportAudioSender(
TransportAudioSender::~TransportAudioSender() {}
-void TransportAudioSender::InsertCodedAudioFrame(
- const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time) {
+void TransportAudioSender::SendFrame(const EncodedFrame& audio_frame) {
if (!initialized_) {
return;
}
if (encryptor_.initialized()) {
- EncodedAudioFrame encrypted_frame;
- if (!EncryptAudioFrame(*audio_frame, &encrypted_frame)) {
+ EncodedFrame encrypted_frame;
+ if (!EncryptAudioFrame(audio_frame, &encrypted_frame)) {
+ NOTREACHED();
return;
}
- rtp_sender_.IncomingEncodedAudioFrame(&encrypted_frame, recorded_time);
+ rtp_sender_.SendFrame(encrypted_frame);
} else {
- rtp_sender_.IncomingEncodedAudioFrame(audio_frame, recorded_time);
+ rtp_sender_.SendFrame(audio_frame);
}
}
bool TransportAudioSender::EncryptAudioFrame(
- const EncodedAudioFrame& audio_frame,
- EncodedAudioFrame* encrypted_frame) {
+ const EncodedFrame& audio_frame, EncodedFrame* encrypted_frame) {
if (!initialized_) {
return false;
}
@@ -54,9 +52,11 @@ bool TransportAudioSender::EncryptAudioFrame(
audio_frame.frame_id, audio_frame.data, &encrypted_frame->data))
return false;
- encrypted_frame->codec = audio_frame.codec;
+ encrypted_frame->dependency = audio_frame.dependency;
encrypted_frame->frame_id = audio_frame.frame_id;
+ encrypted_frame->referenced_frame_id = audio_frame.referenced_frame_id;
encrypted_frame->rtp_timestamp = audio_frame.rtp_timestamp;
+ encrypted_frame->reference_time = audio_frame.reference_time;
return true;
}
diff --git a/media/cast/transport/transport_audio_sender.h b/media/cast/transport/transport_audio_sender.h
index c5bbe07..84780d0 100644
--- a/media/cast/transport/transport_audio_sender.h
+++ b/media/cast/transport/transport_audio_sender.h
@@ -32,8 +32,7 @@ class TransportAudioSender : public base::NonThreadSafe {
// Handles the encoded audio frames to be processed.
// Frames will be encrypted, packetized and transmitted to the network.
- void InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame,
- const base::TimeTicks& recorded_time);
+ void SendFrame(const EncodedFrame& audio_frame);
// Retransmision request.
void ResendPackets(
@@ -49,8 +48,8 @@ class TransportAudioSender : public base::NonThreadSafe {
// Caller must allocate the destination |encrypted_frame|. The data member
// will be resized to hold the encrypted size.
- bool EncryptAudioFrame(const EncodedAudioFrame& audio_frame,
- EncodedAudioFrame* encrypted_frame);
+ bool EncryptAudioFrame(const EncodedFrame& audio_frame,
+ EncodedFrame* encrypted_frame);
RtpSender rtp_sender_;
TransportEncryptionHandler encryptor_;
diff --git a/media/cast/transport/transport_video_sender.cc b/media/cast/transport/transport_video_sender.cc
index 03de697..1add29b8 100644
--- a/media/cast/transport/transport_video_sender.cc
+++ b/media/cast/transport/transport_video_sender.cc
@@ -27,31 +27,24 @@ TransportVideoSender::TransportVideoSender(
TransportVideoSender::~TransportVideoSender() {}
-void TransportVideoSender::InsertCodedVideoFrame(
- const EncodedVideoFrame* coded_frame,
- const base::TimeTicks& capture_time) {
+void TransportVideoSender::SendFrame(const EncodedFrame& video_frame) {
if (!initialized_) {
return;
}
if (encryptor_.initialized()) {
- EncodedVideoFrame encrypted_video_frame;
-
- if (!EncryptVideoFrame(*coded_frame, &encrypted_video_frame))
+ EncodedFrame encrypted_frame;
+ if (!EncryptVideoFrame(video_frame, &encrypted_frame)) {
+ NOTREACHED();
return;
-
- rtp_sender_.IncomingEncodedVideoFrame(&encrypted_video_frame, capture_time);
+ }
+ rtp_sender_.SendFrame(encrypted_frame);
} else {
- rtp_sender_.IncomingEncodedVideoFrame(coded_frame, capture_time);
- }
- if (coded_frame->key_frame) {
- VLOG(1) << "Send encoded key frame; frame_id:"
- << static_cast<int>(coded_frame->frame_id);
+ rtp_sender_.SendFrame(video_frame);
}
}
bool TransportVideoSender::EncryptVideoFrame(
- const EncodedVideoFrame& video_frame,
- EncodedVideoFrame* encrypted_frame) {
+ const EncodedFrame& video_frame, EncodedFrame* encrypted_frame) {
if (!initialized_) {
return false;
}
@@ -59,11 +52,11 @@ bool TransportVideoSender::EncryptVideoFrame(
video_frame.frame_id, video_frame.data, &(encrypted_frame->data)))
return false;
- encrypted_frame->codec = video_frame.codec;
- encrypted_frame->key_frame = video_frame.key_frame;
+ encrypted_frame->dependency = video_frame.dependency;
encrypted_frame->frame_id = video_frame.frame_id;
- encrypted_frame->last_referenced_frame_id =
- video_frame.last_referenced_frame_id;
+ encrypted_frame->referenced_frame_id = video_frame.referenced_frame_id;
+ encrypted_frame->rtp_timestamp = video_frame.rtp_timestamp;
+ encrypted_frame->reference_time = video_frame.reference_time;
return true;
}
diff --git a/media/cast/transport/transport_video_sender.h b/media/cast/transport/transport_video_sender.h
index 151065b..3025cec 100644
--- a/media/cast/transport/transport_video_sender.h
+++ b/media/cast/transport/transport_video_sender.h
@@ -37,8 +37,7 @@ class TransportVideoSender : public base::NonThreadSafe {
// Handles the encoded video frames to be processed.
// Frames will be encrypted, packetized and transmitted to the network.
- void InsertCodedVideoFrame(const EncodedVideoFrame* coded_frame,
- const base::TimeTicks& capture_time);
+ void SendFrame(const EncodedFrame& video_frame);
// Retransmision request.
void ResendPackets(
@@ -52,8 +51,8 @@ class TransportVideoSender : public base::NonThreadSafe {
private:
// Caller must allocate the destination |encrypted_video_frame| the data
// member will be resized to hold the encrypted size.
- bool EncryptVideoFrame(const EncodedVideoFrame& encoded_frame,
- EncodedVideoFrame* encrypted_video_frame);
+ bool EncryptVideoFrame(const EncodedFrame& encoded_frame,
+ EncodedFrame* encrypted_video_frame);
const base::TimeDelta rtp_max_delay_;
TransportEncryptionHandler encryptor_;
diff --git a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h b/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
deleted file mode 100644
index 62ee8ad..0000000
--- a/media/cast/video_receiver/codecs/vp8/vp8_decoder.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_CAST_VIDEO_RECEVIER_CODECS_VP8_VP8_DECODER_H_
-#define MEDIA_CAST_VIDEO_RECEVIER_CODECS_VP8_VP8_DECODER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
-#include "media/cast/cast_receiver.h"
-#include "media/cast/video_receiver/software_video_decoder.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
-
-typedef struct vpx_codec_ctx vpx_dec_ctx_t;
-
-// TODO(mikhal): Look into reusing VpxVideoDecoder.
-namespace media {
-namespace cast {
-
-class Vp8Decoder : public SoftwareVideoDecoder {
- public:
- explicit Vp8Decoder(scoped_refptr<CastEnvironment> cast_environment);
- virtual ~Vp8Decoder();
-
- // SoftwareVideoDecoder implementations.
- virtual bool Decode(const transport::EncodedVideoFrame* encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_cb)
- OVERRIDE;
-
- private:
- // Initialize the decoder.
- void InitDecoder();
- scoped_ptr<vpx_dec_ctx_t> decoder_;
- scoped_refptr<CastEnvironment> cast_environment_;
-
- DISALLOW_COPY_AND_ASSIGN(Vp8Decoder);
-};
-
-} // namespace cast
-} // namespace media
-
-#endif // MEDIA_CAST_VIDEO_RECEVIER_CODECS_VP8_VP8_DECODER_H_
diff --git a/media/cast/video_receiver/video_decoder.cc b/media/cast/video_receiver/video_decoder.cc
index 9f16749..07a11ad 100644
--- a/media/cast/video_receiver/video_decoder.cc
+++ b/media/cast/video_receiver/video_decoder.cc
@@ -9,7 +9,6 @@
#include "base/json/json_reader.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/stl_util.h"
#include "base/values.h"
#include "media/base/video_util.h"
#include "media/cast/cast_defines.h"
@@ -41,18 +40,10 @@ class VideoDecoder::ImplBase
return cast_initialization_status_;
}
- void DecodeFrame(scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK_EQ(cast_initialization_status_, STATUS_VIDEO_INITIALIZED);
- if (encoded_frame->codec != codec_) {
- NOTREACHED();
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(callback, scoped_refptr<VideoFrame>(NULL), false));
- }
-
COMPILE_ASSERT(sizeof(encoded_frame->frame_id) == sizeof(last_frame_id_),
size_of_frame_id_types_do_not_match);
bool is_continuous = true;
@@ -68,7 +59,7 @@ class VideoDecoder::ImplBase
last_frame_id_ = encoded_frame->frame_id;
const scoped_refptr<VideoFrame> decoded_frame = Decode(
- reinterpret_cast<uint8*>(string_as_array(&encoded_frame->data)),
+ encoded_frame->mutable_bytes(),
static_cast<int>(encoded_frame->data.size()));
cast_environment_->PostTask(
CastEnvironment::MAIN,
@@ -248,7 +239,7 @@ CastInitializationStatus VideoDecoder::InitializationResult() const {
}
void VideoDecoder::DecodeFrame(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
+ scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback) {
DCHECK(encoded_frame.get());
DCHECK(!callback.is_null());
diff --git a/media/cast/video_receiver/video_decoder.h b/media/cast/video_receiver/video_decoder.h
index 7f0db54..2831d23 100644
--- a/media/cast/video_receiver/video_decoder.h
+++ b/media/cast/video_receiver/video_decoder.h
@@ -43,7 +43,7 @@ class VideoDecoder {
// monotonically-increasing by 1 for each successive call to this method.
// When it is not, the decoder will assume one or more frames have been
// dropped (e.g., due to packet loss), and will perform recovery actions.
- void DecodeFrame(scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
+ void DecodeFrame(scoped_ptr<transport::EncodedFrame> encoded_frame,
const DecodeFrameCallback& callback);
private:
diff --git a/media/cast/video_receiver/video_decoder_unittest.cc b/media/cast/video_receiver/video_decoder_unittest.cc
index aa6b7ac..4d731ea 100644
--- a/media/cast/video_receiver/video_decoder_unittest.cc
+++ b/media/cast/video_receiver/video_decoder_unittest.cc
@@ -61,10 +61,10 @@ class VideoDecoderTest
total_video_frames_decoded_ = 0;
}
- // Called from the unit test thread to create another EncodedVideoFrame and
- // push it into the decoding pipeline.
+ // Called from the unit test thread to create another EncodedFrame and push it
+ // into the decoding pipeline.
void FeedMoreVideo(int num_dropped_frames) {
- // Prepare a simulated EncodedVideoFrame to feed into the VideoDecoder.
+ // Prepare a simulated EncodedFrame to feed into the VideoDecoder.
const gfx::Size frame_size(kWidth, kHeight);
const scoped_refptr<VideoFrame> video_frame =
@@ -77,11 +77,10 @@ class VideoDecoderTest
PopulateVideoFrame(video_frame, 0);
// Encode |frame| into |encoded_frame->data|.
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
CHECK_EQ(transport::kVp8, GetParam()); // Only support VP8 test currently.
vp8_encoder_.Encode(video_frame, encoded_frame.get());
- encoded_frame->codec = GetParam();
encoded_frame->frame_id = last_frame_id_ + 1 + num_dropped_frames;
last_frame_id_ = encoded_frame->frame_id;
diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc
index 4af2c53..df17a53 100644
--- a/media/cast/video_receiver/video_receiver.cc
+++ b/media/cast/video_receiver/video_receiver.cc
@@ -94,15 +94,16 @@ void VideoReceiver::GetRawVideoFrame(
void VideoReceiver::DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (!encoded_frame) {
- callback.Run(make_scoped_refptr<VideoFrame>(NULL), playout_time, false);
+ callback.Run(
+ make_scoped_refptr<VideoFrame>(NULL), base::TimeTicks(), false);
return;
}
const uint32 frame_id = encoded_frame->frame_id;
const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ const base::TimeTicks playout_time = encoded_frame->reference_time;
video_decoder_->DecodeFrame(encoded_frame.Pass(),
base::Bind(&VideoReceiver::EmitRawVideoFrame,
cast_environment_,
@@ -138,8 +139,7 @@ void VideoReceiver::EmitRawVideoFrame(
callback.Run(video_frame, playout_time, is_continuous);
}
-void VideoReceiver::GetEncodedVideoFrame(
- const VideoFrameEncodedCallback& callback) {
+void VideoReceiver::GetEncodedVideoFrame(const FrameEncodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
frame_request_queue_.push_back(callback);
EmitAvailableEncodedFrames();
@@ -152,8 +152,8 @@ void VideoReceiver::EmitAvailableEncodedFrames() {
// Attempt to peek at the next completed frame from the |framer_|.
// TODO(miu): We should only be peeking at the metadata, and not copying the
// payload yet! Or, at least, peek using a StringPiece instead of a copy.
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
bool is_consecutively_next_frame = false;
if (!framer_.GetEncodedVideoFrame(encoded_frame.get(),
&is_consecutively_next_frame)) {
@@ -201,8 +201,8 @@ void VideoReceiver::EmitAvailableEncodedFrames() {
encoded_frame->data.swap(decrypted_video_data);
}
- // At this point, we have a decrypted EncodedVideoFrame ready to be emitted.
- encoded_frame->codec = codec_;
+ // At this point, we have a decrypted EncodedFrame ready to be emitted.
+ encoded_frame->reference_time = playout_time;
framer_.ReleaseFrame(encoded_frame->frame_id);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT2(
@@ -214,8 +214,7 @@ void VideoReceiver::EmitAvailableEncodedFrames() {
cast_environment_->PostTask(CastEnvironment::MAIN,
FROM_HERE,
base::Bind(frame_request_queue_.front(),
- base::Passed(&encoded_frame),
- playout_time));
+ base::Passed(&encoded_frame)));
frame_request_queue_.pop_front();
}
}
diff --git a/media/cast/video_receiver/video_receiver.h b/media/cast/video_receiver/video_receiver.h
index 697657e..ff7ccda 100644
--- a/media/cast/video_receiver/video_receiver.h
+++ b/media/cast/video_receiver/video_receiver.h
@@ -76,7 +76,7 @@ class VideoReceiver : public RtpReceiver,
//
// The given |callback| is guaranteed to be run at some point in the future,
// even if to respond with NULL at shutdown time.
- void GetEncodedVideoFrame(const VideoFrameEncodedCallback& callback);
+ void GetEncodedVideoFrame(const FrameEncodedCallback& callback);
// Deliver another packet, possibly a duplicate, and possibly out-of-order.
void IncomingPacket(scoped_ptr<Packet> packet);
@@ -102,12 +102,11 @@ class VideoReceiver : public RtpReceiver,
// EmitAvailableEncodedFrames().
void EmitAvailableEncodedFramesAfterWaiting();
- // Feeds an EncodedVideoFrame into |video_decoder_|. GetRawVideoFrame() uses
- // this as a callback for GetEncodedVideoFrame().
+ // Feeds an EncodedFrame into |video_decoder_|. GetRawVideoFrame() uses this
+ // as a callback for GetEncodedVideoFrame().
void DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& playout_time);
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
// Return the playout time based on the current time and rtp timestamp.
base::TimeTicks GetPlayoutTime(base::TimeTicks now, uint32 rtp_timestamp);
@@ -160,7 +159,7 @@ class VideoReceiver : public RtpReceiver,
transport::TransportEncryptionHandler decryptor_;
// Outstanding callbacks to run to deliver on client requests for frames.
- std::list<VideoFrameEncodedCallback> frame_request_queue_;
+ std::list<FrameEncodedCallback> frame_request_queue_;
// True while there's an outstanding task to re-invoke
// EmitAvailableEncodedFrames().
diff --git a/media/cast/video_receiver/video_receiver_unittest.cc b/media/cast/video_receiver/video_receiver_unittest.cc
index e9d8bbd..4cc7dd2 100644
--- a/media/cast/video_receiver/video_receiver_unittest.cc
+++ b/media/cast/video_receiver/video_receiver_unittest.cc
@@ -37,13 +37,11 @@ class FakeVideoClient {
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame> video_frame,
- const base::TimeTicks& playout_time) {
+ scoped_ptr<transport::EncodedFrame> video_frame) {
ASSERT_FALSE(!video_frame)
<< "If at shutdown: There were unsatisfied requests enqueued.";
EXPECT_EQ(expected_frame_id_, video_frame->frame_id);
- EXPECT_EQ(transport::kVp8, video_frame->codec);
- EXPECT_EQ(expected_playout_time_, playout_time);
+ EXPECT_EQ(expected_playout_time_, video_frame->reference_time);
++num_called_;
}
@@ -156,7 +154,7 @@ TEST_F(VideoReceiverTest, MultiplePendingGetCalls) {
.WillRepeatedly(testing::Return(true));
// Enqueue a request for an video frame.
- const VideoFrameEncodedCallback frame_encoded_callback =
+ const FrameEncodedCallback frame_encoded_callback =
base::Bind(&FakeVideoClient::DeliverEncodedVideoFrame,
base::Unretained(&fake_video_client_));
receiver_->GetEncodedVideoFrame(frame_encoded_callback);
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
index 38c7dfc..4905d34 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.cc
@@ -135,7 +135,7 @@ void Vp8Encoder::InitEncode(int number_of_encode_threads) {
}
bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) {
+ transport::EncodedFrame* encoded_image) {
DCHECK(thread_checker_.CalledOnValidThread());
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
@@ -199,35 +199,34 @@ bool Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
// Get encoded frame.
const vpx_codec_cx_pkt_t* pkt = NULL;
vpx_codec_iter_t iter = NULL;
- size_t total_size = 0;
+ bool is_key_frame = false;
while ((pkt = vpx_codec_get_cx_data(encoder_.get(), &iter)) != NULL) {
- if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
- total_size += pkt->data.frame.sz;
- encoded_image->data.reserve(total_size);
- encoded_image->data.insert(
- encoded_image->data.end(),
- static_cast<const uint8*>(pkt->data.frame.buf),
- static_cast<const uint8*>(pkt->data.frame.buf) + pkt->data.frame.sz);
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- encoded_image->key_frame = true;
- } else {
- encoded_image->key_frame = false;
- }
- }
+ if (pkt->kind != VPX_CODEC_CX_FRAME_PKT)
+ continue;
+ encoded_image->data.assign(
+ static_cast<const uint8*>(pkt->data.frame.buf),
+ static_cast<const uint8*>(pkt->data.frame.buf) + pkt->data.frame.sz);
+ is_key_frame = !!(pkt->data.frame.flags & VPX_FRAME_IS_KEY);
+ break; // Done, since all data is provided in one CX_FRAME_PKT packet.
}
// Don't update frame_id for zero size frames.
- if (total_size == 0)
+ if (encoded_image->data.empty())
return true;
// Populate the encoded frame.
- encoded_image->codec = transport::kVp8;
- encoded_image->last_referenced_frame_id = latest_frame_id_to_reference;
encoded_image->frame_id = ++last_encoded_frame_id_;
+ if (is_key_frame) {
+ encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->referenced_frame_id = encoded_image->frame_id;
+ } else {
+ encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->referenced_frame_id = latest_frame_id_to_reference;
+ }
- VLOG(1) << "VP8 encoded frame:" << static_cast<int>(encoded_image->frame_id)
- << " sized:" << total_size;
+ DVLOG(1) << "VP8 encoded frame_id " << encoded_image->frame_id
+ << ", sized:" << encoded_image->data.size();
- if (encoded_image->key_frame) {
+ if (is_key_frame) {
key_frame_requested_ = false;
for (int i = 0; i < kNumberOfVp8VideoBuffers; ++i) {
diff --git a/media/cast/video_sender/codecs/vp8/vp8_encoder.h b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
index aff6215..82ef2c2 100644
--- a/media/cast/video_sender/codecs/vp8/vp8_encoder.h
+++ b/media/cast/video_sender/codecs/vp8/vp8_encoder.h
@@ -37,7 +37,7 @@ class Vp8Encoder : public SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream).
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) OVERRIDE;
+ transport::EncodedFrame* encoded_image) OVERRIDE;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
diff --git a/media/cast/video_sender/external_video_encoder.cc b/media/cast/video_sender/external_video_encoder.cc
index 627b723..d2e3065 100644
--- a/media/cast/video_sender/external_video_encoder.cc
+++ b/media/cast/video_sender/external_video_encoder.cc
@@ -222,21 +222,19 @@ class LocalVideoEncodeAcceleratorClient
stream_header_.append(static_cast<const char*>(output_buffer->memory()),
payload_size);
} else if (!encoded_frame_data_storage_.empty()) {
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
-
- encoded_frame->codec = codec_;
- encoded_frame->key_frame = key_frame;
- encoded_frame->last_referenced_frame_id = last_encoded_frame_id_;
- last_encoded_frame_id_++;
- encoded_frame->frame_id = last_encoded_frame_id_;
- encoded_frame->rtp_timestamp = GetVideoRtpTimestamp(
- encoded_frame_data_storage_.front().capture_time);
- if (key_frame) {
- // Self referenced.
- encoded_frame->last_referenced_frame_id = encoded_frame->frame_id;
- }
-
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ encoded_frame->dependency = key_frame ? transport::EncodedFrame::KEY :
+ transport::EncodedFrame::DEPENDENT;
+ encoded_frame->frame_id = ++last_encoded_frame_id_;
+ if (key_frame)
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id;
+ else
+ encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
+ encoded_frame->reference_time =
+ encoded_frame_data_storage_.front().capture_time;
+ encoded_frame->rtp_timestamp =
+ GetVideoRtpTimestamp(encoded_frame->reference_time);
if (!stream_header_.empty()) {
encoded_frame->data = stream_header_;
stream_header_.clear();
@@ -257,8 +255,7 @@ class LocalVideoEncodeAcceleratorClient
CastEnvironment::MAIN,
FROM_HERE,
base::Bind(encoded_frame_data_storage_.front().frame_encoded_callback,
- base::Passed(&encoded_frame),
- encoded_frame_data_storage_.front().capture_time));
+ base::Passed(&encoded_frame)));
encoded_frame_data_storage_.pop_front();
} else {
diff --git a/media/cast/video_sender/external_video_encoder_unittest.cc b/media/cast/video_sender/external_video_encoder_unittest.cc
index 1f2e4dd0..20c9756 100644
--- a/media/cast/video_sender/external_video_encoder_unittest.cc
+++ b/media/cast/video_sender/external_video_encoder_unittest.cc
@@ -45,24 +45,26 @@ class TestVideoEncoderCallback
public:
TestVideoEncoderCallback() {}
- void SetExpectedResult(bool expected_key_frame,
- uint8 expected_frame_id,
- uint8 expected_last_referenced_frame_id,
+ void SetExpectedResult(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
const base::TimeTicks& expected_capture_time) {
- expected_key_frame_ = expected_key_frame;
expected_frame_id_ = expected_frame_id;
expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
expected_capture_time_ = expected_capture_time;
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
- EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ if (expected_frame_id_ == expected_last_referenced_frame_id_) {
+ EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ encoded_frame->dependency);
+ }
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->last_referenced_frame_id);
- EXPECT_EQ(expected_capture_time_, capture_time);
+ encoded_frame->referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
}
protected:
@@ -72,8 +74,8 @@ class TestVideoEncoderCallback
friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
bool expected_key_frame_;
- uint8 expected_frame_id_;
- uint8 expected_last_referenced_frame_id_;
+ uint32 expected_frame_id_;
+ uint32 expected_last_referenced_frame_id_;
base::TimeTicks expected_capture_time_;
DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
@@ -145,15 +147,14 @@ TEST_F(ExternalVideoEncoderTest, EncodePattern30fpsRunningOutOfAck) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
for (int i = 0; i < 6; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(
- false, i + 1, i, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i + 1, i, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -172,7 +173,7 @@ TEST_F(ExternalVideoEncoderTest, SkipNextFrame) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -188,8 +189,7 @@ TEST_F(ExternalVideoEncoderTest, SkipNextFrame) {
video_encoder_->SkipNextFrame(false);
for (int i = 0; i < 2; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(
- false, i + 1, i, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i + 1, i, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -212,7 +212,7 @@ TEST_F(ExternalVideoEncoderTest, StreamHeader) {
// Verify the first returned bitstream buffer is still a key frame.
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
diff --git a/media/cast/video_sender/fake_software_video_encoder.cc b/media/cast/video_sender/fake_software_video_encoder.cc
index 3afd858..01a3cce 100644
--- a/media/cast/video_sender/fake_software_video_encoder.cc
+++ b/media/cast/video_sender/fake_software_video_encoder.cc
@@ -25,18 +25,23 @@ void FakeSoftwareVideoEncoder::Initialize() {}
bool FakeSoftwareVideoEncoder::Encode(
const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) {
- encoded_image->codec = transport::kFakeSoftwareVideo;
- encoded_image->key_frame = next_frame_is_key_;
- next_frame_is_key_ = false;
+ transport::EncodedFrame* encoded_image) {
encoded_image->frame_id = frame_id_++;
- encoded_image->last_referenced_frame_id = encoded_image->frame_id - 1;
+ if (next_frame_is_key_) {
+ encoded_image->dependency = transport::EncodedFrame::KEY;
+ encoded_image->referenced_frame_id = encoded_image->frame_id;
+ next_frame_is_key_ = false;
+ } else {
+ encoded_image->dependency = transport::EncodedFrame::DEPENDENT;
+ encoded_image->referenced_frame_id = encoded_image->frame_id - 1;
+ }
base::DictionaryValue values;
- values.Set("key", base::Value::CreateBooleanValue(encoded_image->key_frame));
+ values.Set("key", base::Value::CreateBooleanValue(
+ encoded_image->dependency == transport::EncodedFrame::KEY));
values.Set("id", base::Value::CreateIntegerValue(encoded_image->frame_id));
values.Set("ref", base::Value::CreateIntegerValue(
- encoded_image->last_referenced_frame_id));
+ encoded_image->referenced_frame_id));
base::JSONWriter::Write(&values, &encoded_image->data);
return true;
}
diff --git a/media/cast/video_sender/fake_software_video_encoder.h b/media/cast/video_sender/fake_software_video_encoder.h
index bcc5ed0..03d83c2 100644
--- a/media/cast/video_sender/fake_software_video_encoder.h
+++ b/media/cast/video_sender/fake_software_video_encoder.h
@@ -18,7 +18,7 @@ class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
// SoftwareVideoEncoder implementations.
virtual void Initialize() OVERRIDE;
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) OVERRIDE;
+ transport::EncodedFrame* encoded_image) OVERRIDE;
virtual void UpdateRates(uint32 new_bitrate) OVERRIDE;
virtual void GenerateKeyFrame() OVERRIDE;
virtual void LatestFrameIdToReference(uint32 frame_id) OVERRIDE;
diff --git a/media/cast/video_sender/software_video_encoder.h b/media/cast/video_sender/software_video_encoder.h
index 3d63f20..f1bf6f6 100644
--- a/media/cast/video_sender/software_video_encoder.h
+++ b/media/cast/video_sender/software_video_encoder.h
@@ -15,7 +15,7 @@ class VideoFrame;
namespace media {
namespace cast {
namespace transport {
-struct EncodedVideoFrame;
+struct EncodedFrame;
} // namespace transport
class SoftwareVideoEncoder {
@@ -28,7 +28,7 @@ class SoftwareVideoEncoder {
// Encode a raw image (as a part of a video stream).
virtual bool Encode(const scoped_refptr<media::VideoFrame>& video_frame,
- transport::EncodedVideoFrame* encoded_image) = 0;
+ transport::EncodedFrame* encoded_image) = 0;
// Update the encoder with a new target bit rate.
virtual void UpdateRates(uint32 new_bitrate) = 0;
diff --git a/media/cast/video_sender/video_encoder.h b/media/cast/video_sender/video_encoder.h
index 09a5b29..48d63ab 100644
--- a/media/cast/video_sender/video_encoder.h
+++ b/media/cast/video_sender/video_encoder.h
@@ -20,8 +20,8 @@ namespace cast {
// All these functions are called from the main cast thread.
class VideoEncoder {
public:
- typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
virtual ~VideoEncoder() {}
diff --git a/media/cast/video_sender/video_encoder_impl.cc b/media/cast/video_sender/video_encoder_impl.cc
index 2eceda3..7f8f1e4 100644
--- a/media/cast/video_sender/video_encoder_impl.cc
+++ b/media/cast/video_sender/video_encoder_impl.cc
@@ -43,25 +43,24 @@ void EncodeVideoFrameOnEncoderThread(
dynamic_config.latest_frame_id_to_reference);
encoder->UpdateRates(dynamic_config.bit_rate);
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
- bool retval = encoder->Encode(video_frame, encoded_frame.get());
-
- encoded_frame->rtp_timestamp = transport::GetVideoRtpTimestamp(capture_time);
-
- if (!retval) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame(
+ new transport::EncodedFrame());
+ if (!encoder->Encode(video_frame, encoded_frame.get())) {
VLOG(1) << "Encoding failed";
return;
}
- if (encoded_frame->data.size() <= 0) {
+ if (encoded_frame->data.empty()) {
VLOG(1) << "Encoding resulted in an empty frame";
return;
}
+ encoded_frame->rtp_timestamp = transport::GetVideoRtpTimestamp(capture_time);
+ encoded_frame->reference_time = capture_time;
+
environment->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
base::Bind(
- frame_encoded_callback, base::Passed(&encoded_frame), capture_time));
+ frame_encoded_callback, base::Passed(&encoded_frame)));
}
} // namespace
diff --git a/media/cast/video_sender/video_encoder_impl.h b/media/cast/video_sender/video_encoder_impl.h
index 4bc0a83..47265c1 100644
--- a/media/cast/video_sender/video_encoder_impl.h
+++ b/media/cast/video_sender/video_encoder_impl.h
@@ -26,8 +26,8 @@ class VideoEncoderImpl : public VideoEncoder {
int bit_rate;
};
- typedef base::Callback<void(scoped_ptr<transport::EncodedVideoFrame>,
- const base::TimeTicks&)> FrameEncodedCallback;
+ typedef base::Callback<void(scoped_ptr<transport::EncodedFrame>)>
+ FrameEncodedCallback;
VideoEncoderImpl(scoped_refptr<CastEnvironment> cast_environment,
const VideoSenderConfig& video_config,
diff --git a/media/cast/video_sender/video_encoder_impl_unittest.cc b/media/cast/video_sender/video_encoder_impl_unittest.cc
index 0577239..b1a5cb8 100644
--- a/media/cast/video_sender/video_encoder_impl_unittest.cc
+++ b/media/cast/video_sender/video_encoder_impl_unittest.cc
@@ -26,24 +26,26 @@ class TestVideoEncoderCallback
public:
TestVideoEncoderCallback() {}
- void SetExpectedResult(bool expected_key_frame,
- uint8 expected_frame_id,
- uint8 expected_last_referenced_frame_id,
+ void SetExpectedResult(uint32 expected_frame_id,
+ uint32 expected_last_referenced_frame_id,
const base::TimeTicks& expected_capture_time) {
- expected_key_frame_ = expected_key_frame;
expected_frame_id_ = expected_frame_id;
expected_last_referenced_frame_id_ = expected_last_referenced_frame_id;
expected_capture_time_ = expected_capture_time;
}
void DeliverEncodedVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
- EXPECT_EQ(expected_key_frame_, encoded_frame->key_frame);
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
+ if (expected_frame_id_ == expected_last_referenced_frame_id_) {
+ EXPECT_EQ(transport::EncodedFrame::KEY, encoded_frame->dependency);
+ } else {
+ EXPECT_EQ(transport::EncodedFrame::DEPENDENT,
+ encoded_frame->dependency);
+ }
EXPECT_EQ(expected_frame_id_, encoded_frame->frame_id);
EXPECT_EQ(expected_last_referenced_frame_id_,
- encoded_frame->last_referenced_frame_id);
- EXPECT_EQ(expected_capture_time_, capture_time);
+ encoded_frame->referenced_frame_id);
+ EXPECT_EQ(expected_capture_time_, encoded_frame->reference_time);
}
protected:
@@ -52,9 +54,8 @@ class TestVideoEncoderCallback
private:
friend class base::RefCountedThreadSafe<TestVideoEncoderCallback>;
- bool expected_key_frame_;
- uint8 expected_frame_id_;
- uint8 expected_last_referenced_frame_id_;
+ uint32 expected_frame_id_;
+ uint32 expected_last_referenced_frame_id_;
base::TimeTicks expected_capture_time_;
DISALLOW_COPY_AND_ASSIGN(TestVideoEncoderCallback);
@@ -128,21 +129,21 @@ TEST_F(VideoEncoderImplTest, EncodePattern30fpsRunningOutOfAck) {
base::TimeTicks capture_time;
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_->LatestFrameIdToReference(0);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
capture_time += base::TimeDelta::FromMilliseconds(33);
video_encoder_->LatestFrameIdToReference(1);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 1, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(2, 1, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -151,7 +152,7 @@ TEST_F(VideoEncoderImplTest, EncodePattern30fpsRunningOutOfAck) {
for (int i = 3; i < 6; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i, 2, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -170,21 +171,21 @@ TEST_F(VideoEncoderImplTest, DISABLED_EncodePattern60fpsRunningOutOfAck) {
test_video_encoder_callback_.get());
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(2, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -193,7 +194,7 @@ TEST_F(VideoEncoderImplTest, DISABLED_EncodePattern60fpsRunningOutOfAck) {
for (int i = 3; i < 9; ++i) {
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, i, 2, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i, 2, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -212,35 +213,35 @@ TEST_F(VideoEncoderImplTest,
test_video_encoder_callback_.get());
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(true, 0, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(0, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(0);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 1, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(1, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(1);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 2, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(2, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(2);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 3, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(3, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
video_encoder_->LatestFrameIdToReference(3);
capture_time += base::TimeDelta::FromMilliseconds(33);
- test_video_encoder_callback_->SetExpectedResult(false, 4, 0, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(4, 0, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
@@ -248,7 +249,7 @@ TEST_F(VideoEncoderImplTest,
video_encoder_->LatestFrameIdToReference(4);
for (int i = 5; i < 17; ++i) {
- test_video_encoder_callback_->SetExpectedResult(false, i, 4, capture_time);
+ test_video_encoder_callback_->SetExpectedResult(i, 4, capture_time);
EXPECT_TRUE(video_encoder_->EncodeVideoFrame(
video_frame_, capture_time, frame_encoded_callback));
task_runner_->RunTasks();
diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc
index 5a15463..bd2c013 100644
--- a/media/cast/video_sender/video_sender.cc
+++ b/media/cast/video_sender/video_sender.cc
@@ -147,7 +147,7 @@ void VideoSender::InsertRawVideoFrame(
"cast_perf_test", "InsertRawVideoFrame",
TRACE_EVENT_SCOPE_THREAD,
"timestamp", capture_time.ToInternalValue(),
- "rtp_timestamp", GetVideoRtpTimestamp(capture_time));
+ "rtp_timestamp", rtp_timestamp);
if (video_encoder_->EncodeVideoFrame(
video_frame,
@@ -162,14 +162,11 @@ void VideoSender::InsertRawVideoFrame(
void VideoSender::SendEncodedVideoFrameMainThread(
int requested_bitrate_before_encode,
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time) {
+ scoped_ptr<transport::EncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
last_send_time_ = cast_environment_->Clock()->NowTicks();
- if (encoded_frame->key_frame) {
- VLOG(1) << "Send encoded key frame; frame_id:"
- << static_cast<int>(encoded_frame->frame_id);
- }
+ VLOG_IF(1, encoded_frame->dependency == transport::EncodedFrame::KEY)
+ << "Send encoded key frame; frame_id: " << encoded_frame->frame_id;
DCHECK_GT(frames_in_encoder_, 0);
frames_in_encoder_--;
@@ -177,22 +174,23 @@ void VideoSender::SendEncodedVideoFrameMainThread(
cast_environment_->Logging()->InsertEncodedFrameEvent(
last_send_time_, FRAME_ENCODED, VIDEO_EVENT, encoded_frame->rtp_timestamp,
frame_id, static_cast<int>(encoded_frame->data.size()),
- encoded_frame->key_frame,
+ encoded_frame->dependency == transport::EncodedFrame::KEY,
requested_bitrate_before_encode);
// Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
TRACE_EVENT_INSTANT1(
"cast_perf_test", "VideoFrameEncoded",
TRACE_EVENT_SCOPE_THREAD,
- "rtp_timestamp", GetVideoRtpTimestamp(capture_time));
+ "rtp_timestamp", encoded_frame->rtp_timestamp);
// Only use lowest 8 bits as key.
frame_id_to_rtp_timestamp_[frame_id & 0xff] = encoded_frame->rtp_timestamp;
last_sent_frame_id_ = static_cast<int>(encoded_frame->frame_id);
- rtp_timestamp_helper_.StoreLatestTime(capture_time,
+ DCHECK(!encoded_frame->reference_time.is_null());
+ rtp_timestamp_helper_.StoreLatestTime(encoded_frame->reference_time,
encoded_frame->rtp_timestamp);
- transport_sender_->InsertCodedVideoFrame(encoded_frame.get(), capture_time);
+ transport_sender_->InsertCodedVideoFrame(*encoded_frame);
UpdateFramesInFlight();
InitializeTimers();
}
diff --git a/media/cast/video_sender/video_sender.h b/media/cast/video_sender/video_sender.h
index d0bb370..23b4857 100644
--- a/media/cast/video_sender/video_sender.h
+++ b/media/cast/video_sender/video_sender.h
@@ -85,16 +85,13 @@ class VideoSender : public base::NonThreadSafe,
void ScheduleNextSkippedFramesCheck();
void SkippedFramesCheck();
- void SendEncodedVideoFrame(const transport::EncodedVideoFrame* video_frame,
- const base::TimeTicks& capture_time);
void ResendFrame(uint32 resend_frame_id);
void ReceivedAck(uint32 acked_frame_id);
void UpdateFramesInFlight();
void SendEncodedVideoFrameMainThread(
int requested_bitrate_before_encode,
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& capture_time);
+ scoped_ptr<transport::EncodedFrame> encoded_frame);
void InitializeTimers();
diff --git a/media/cast/video_sender/video_sender_unittest.cc b/media/cast/video_sender/video_sender_unittest.cc
index 63d9f83..c68fb70 100644
--- a/media/cast/video_sender/video_sender_unittest.cc
+++ b/media/cast/video_sender/video_sender_unittest.cc
@@ -210,7 +210,7 @@ TEST_F(VideoSenderTest, BuiltInEncoder) {
InitEncoder(false);
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
@@ -225,7 +225,7 @@ TEST_F(VideoSenderTest, ExternalEncoder) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
task_runner_->RunTasks();
@@ -240,7 +240,7 @@ TEST_F(VideoSenderTest, RtcpTimer) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time = testing_clock_->NowTicks();
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
// Make sure that we send at least one RTCP packet.
@@ -265,7 +265,7 @@ TEST_F(VideoSenderTest, ResendTimer) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
// ACK the key frame.
@@ -297,7 +297,7 @@ TEST_F(VideoSenderTest, LogAckReceivedEvent) {
for (int i = 0; i < num_frames; i++) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
+ const base::TimeTicks capture_time = testing_clock_->NowTicks();
video_sender_->InsertRawVideoFrame(video_frame, capture_time);
RunTasks(33);
}
@@ -326,15 +326,13 @@ TEST_F(VideoSenderTest, StopSendingIntheAbsenceOfAck) {
// than 4 frames in flight.
// Store size in packets of frame 0, as it should be resent sue to timeout.
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
const int size_of_frame0 = transport_.number_of_rtp_packets();
for (int i = 1; i < 4; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
}
@@ -343,8 +341,7 @@ TEST_F(VideoSenderTest, StopSendingIntheAbsenceOfAck) {
// received any acks.
for (int i = 0; i < 3; ++i) {
scoped_refptr<media::VideoFrame> video_frame = GetNewVideoFrame();
- base::TimeTicks capture_time;
- video_sender_->InsertRawVideoFrame(video_frame, capture_time);
+ video_sender_->InsertRawVideoFrame(video_frame, testing_clock_->NowTicks());
RunTasks(33);
}