diff options
author | mikhal@google.com <mikhal@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-11-22 06:48:26 +0000 |
---|---|---|
committer | mikhal@google.com <mikhal@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-11-22 06:48:26 +0000 |
commit | 331d9fcd308cc44a1a632f9e8fd76f5ebf2ede7c (patch) | |
tree | e3563e45ab713faadf81fcac24e8c35930c032e2 /media | |
parent | 6caaacfec964079f6998696334fa62b68facbdba (diff) | |
download | chromium_src-331d9fcd308cc44a1a632f9e8fd76f5ebf2ede7c.zip chromium_src-331d9fcd308cc44a1a632f9e8fd76f5ebf2ede7c.tar.gz chromium_src-331d9fcd308cc44a1a632f9e8fd76f5ebf2ede7c.tar.bz2 |
Making cast run on multi thread
1. Switching weak_ptr->unretained(this) which allows switching betwen threads.
2. Adding thread DCHECKS
Review URL: https://codereview.chromium.org/81353002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@236702 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r-- | media/cast/audio_receiver/audio_receiver.cc | 4 | ||||
-rw-r--r-- | media/cast/audio_sender/audio_sender.cc | 3 | ||||
-rw-r--r-- | media/cast/pacing/paced_sender.cc | 7 | ||||
-rw-r--r-- | media/cast/video_receiver/video_receiver.cc | 16 | ||||
-rw-r--r-- | media/cast/video_sender/video_encoder.cc | 7 | ||||
-rw-r--r-- | media/cast/video_sender/video_sender.cc | 2 |
6 files changed, 28 insertions, 11 deletions
diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/audio_receiver/audio_receiver.cc index b5b9acf..cea52df 100644 --- a/media/cast/audio_receiver/audio_receiver.cc +++ b/media/cast/audio_receiver/audio_receiver.cc @@ -142,6 +142,7 @@ void AudioReceiver::InitializeTimers() { void AudioReceiver::IncomingParsedRtpPacket(const uint8* payload_data, size_t payload_size, const RtpCastHeader& rtp_header) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); cast_environment_->Logging()->InsertPacketEvent(kPacketReceived, rtp_header.webrtc.header.timestamp, rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id, payload_size); @@ -196,7 +197,7 @@ void AudioReceiver::GetRawAudioFrame(int number_of_10ms_blocks, cast_environment_->PostTask(CastEnvironment::AUDIO_DECODER, FROM_HERE, base::Bind(&AudioReceiver::DecodeAudioFrameThread, - weak_factory_.GetWeakPtr(), + base::Unretained(this), number_of_10ms_blocks, desired_frequency, callback)); @@ -300,6 +301,7 @@ bool AudioReceiver::PostEncodedAudioFrame( scoped_ptr<EncodedAudioFrame>* encoded_frame) { DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); DCHECK(audio_buffer_) << "Invalid function call in this configuration"; + base::TimeTicks now = cast_environment_->Clock()->NowTicks(); base::TimeTicks playout_time = GetPlayoutTime(now, rtp_timestamp); base::TimeDelta time_until_playout = playout_time - now; diff --git a/media/cast/audio_sender/audio_sender.cc b/media/cast/audio_sender/audio_sender.cc index 66133f1..875b683 100644 --- a/media/cast/audio_sender/audio_sender.cc +++ b/media/cast/audio_sender/audio_sender.cc @@ -117,6 +117,9 @@ void AudioSender::InsertCodedAudioFrame(const EncodedAudioFrame* audio_frame, DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); DCHECK(audio_encoder_.get() == NULL) << "Invalid internal state"; + cast_environment_->Logging()->InsertFrameEvent(kAudioFrameReceived, + GetVideoRtpTimestamp(recorded_time), kFrameIdUnknown); + if (encryptor_) { EncodedAudioFrame encrypted_frame; if (!EncryptAudioFrame(*audio_frame, &encrypted_frame)) { diff --git a/media/cast/pacing/paced_sender.cc b/media/cast/pacing/paced_sender.cc index 4abda9b..cc53075 100644 --- a/media/cast/pacing/paced_sender.cc +++ b/media/cast/pacing/paced_sender.cc @@ -28,15 +28,18 @@ PacedSender::PacedSender(scoped_refptr<CastEnvironment> cast_environment, PacedSender::~PacedSender() {} bool PacedSender::SendPackets(const PacketList& packets) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); return SendPacketsToTransport(packets, &packet_list_); } bool PacedSender::ResendPackets(const PacketList& packets) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); return SendPacketsToTransport(packets, &resend_packet_list_); } bool PacedSender::SendPacketsToTransport(const PacketList& packets, PacketList* packets_not_sent) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); UpdateBurstSize(packets.size()); if (!packets_not_sent->empty()) { @@ -65,6 +68,7 @@ bool PacedSender::SendPacketsToTransport(const PacketList& packets, } bool PacedSender::SendRtcpPacket(const Packet& packet) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); // We pass the RTCP packets straight through. return transport_->SendPacket(packet); } @@ -82,12 +86,14 @@ void PacedSender::ScheduleNextSend() { } void PacedSender::SendNextPacketBurst() { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); SendStoredPackets(); time_last_process_ = cast_environment_->Clock()->NowTicks(); ScheduleNextSend(); } void PacedSender::SendStoredPackets() { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); if (packet_list_.empty() && resend_packet_list_.empty()) return; size_t packets_to_send = burst_size_; @@ -123,6 +129,7 @@ void PacedSender::SendStoredPackets() { } void PacedSender::UpdateBurstSize(size_t packets_to_send) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); packets_to_send = std::max(packets_to_send, resend_packet_list_.size() + packet_list_.size()); diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc index c637d7a..19f98e8 100644 --- a/media/cast/video_receiver/video_receiver.cc +++ b/media/cast/video_receiver/video_receiver.cc @@ -164,7 +164,8 @@ void VideoReceiver::DecodeVideoFrame( // Hand the ownership of the encoded frame to the decode thread. cast_environment_->PostTask(CastEnvironment::VIDEO_DECODER, FROM_HERE, base::Bind(&VideoReceiver::DecodeVideoFrameThread, - weak_factory_.GetWeakPtr(), base::Passed(&encoded_frame), + base::Unretained(this), + base::Passed(&encoded_frame), render_time, callback)); } @@ -180,13 +181,14 @@ void VideoReceiver::DecodeVideoFrameThread( frame_decoded_callback))) { // This will happen if we decide to decode but not show a frame. cast_environment_->PostTask(CastEnvironment::MAIN, FROM_HERE, - base::Bind(&VideoReceiver::GetRawVideoFrame, - weak_factory_.GetWeakPtr(), frame_decoded_callback)); + base::Bind(&VideoReceiver::GetRawVideoFrame, base::Unretained(this), + frame_decoded_callback)); } } bool VideoReceiver::DecryptVideoFrame( scoped_ptr<EncodedVideoFrame>* video_frame) { + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); DCHECK(decryptor_) << "Invalid state"; if (!decryptor_->SetCounter(GetAesNonce((*video_frame)->frame_id, @@ -196,7 +198,7 @@ bool VideoReceiver::DecryptVideoFrame( } std::string decrypted_video_data; if (!decryptor_->Decrypt((*video_frame)->data, &decrypted_video_data)) { - VLOG(0) << "Decryption error"; + VLOG(1) << "Decryption error"; // Give up on this frame, release it from jitter buffer. framer_->ReleaseFrame((*video_frame)->frame_id); return false; @@ -241,7 +243,7 @@ void VideoReceiver::GetEncodedVideoFrame( // Should we pull the encoded video frame from the framer? decided by if this is // the next frame or we are running out of time and have to pull the following // frame. -// If the frame it too old to be rendered we set the don't show flag in the +// If the frame is too old to be rendered we set the don't show flag in the // video bitstream where possible. bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp, bool next_frame, scoped_ptr<EncodedVideoFrame>* encoded_frame, @@ -268,7 +270,7 @@ bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp, cast_environment_->PostDelayedTask(CastEnvironment::MAIN, FROM_HERE, base::Bind(&VideoReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()), time_until_release); - VLOG(0) << "Wait before releasing frame " + VLOG(1) << "Wait before releasing frame " << static_cast<int>((*encoded_frame)->frame_id) << " time " << time_until_release.InMilliseconds(); return false; @@ -278,7 +280,7 @@ bool VideoReceiver::PullEncodedVideoFrame(uint32 rtp_timestamp, base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs); if (codec_ == kVp8 && time_until_render < dont_show_timeout_delta) { (*encoded_frame)->data[0] &= 0xef; - VLOG(0) << "Don't show frame " + VLOG(1) << "Don't show frame " << static_cast<int>((*encoded_frame)->frame_id) << " time_until_render:" << time_until_render.InMilliseconds(); } else { diff --git a/media/cast/video_sender/video_encoder.cc b/media/cast/video_sender/video_encoder.cc index b55f442..b5aff70 100644 --- a/media/cast/video_sender/video_encoder.cc +++ b/media/cast/video_sender/video_encoder.cc @@ -46,9 +46,10 @@ bool VideoEncoder::EncodeVideoFrame( } cast_environment_->PostTask(CastEnvironment::VIDEO_ENCODER, FROM_HERE, - base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, this, - video_frame, capture_time, dynamic_config_, frame_encoded_callback, - frame_release_callback)); + base::Bind(&VideoEncoder::EncodeVideoFrameEncoderThread, + base::Unretained(this), video_frame, capture_time, + dynamic_config_, frame_encoded_callback, + frame_release_callback)); dynamic_config_.key_frame_requested = false; return true; diff --git a/media/cast/video_sender/video_sender.cc b/media/cast/video_sender/video_sender.cc index eb560c6..b62e530 100644 --- a/media/cast/video_sender/video_sender.cc +++ b/media/cast/video_sender/video_sender.cc @@ -133,6 +133,8 @@ void VideoSender::InsertRawVideoFrame( const base::Closure& callback) { DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); DCHECK(video_encoder_.get()) << "Invalid state"; + cast_environment_->Logging()->InsertFrameEvent(kVideoFrameReceived, + GetVideoRtpTimestamp(capture_time), kFrameIdUnknown); if (!video_encoder_->EncodeVideoFrame(video_frame, capture_time, base::Bind(&VideoSender::SendEncodedVideoFrameMainThread, |