summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authormiu <miu@chromium.org>2015-05-19 17:31:35 -0700
committerCommit bot <commit-bot@chromium.org>2015-05-20 00:31:37 +0000
commit1cb99158245ac2a36ea0ee5e20fd2fff71c864d1 (patch)
tree3b30842836776bc11d678ec0991305243b6a888e /media
parent324a169646a722c4be1e222b3bb053c46a65956e (diff)
downloadchromium_src-1cb99158245ac2a36ea0ee5e20fd2fff71c864d1.zip
chromium_src-1cb99158245ac2a36ea0ee5e20fd2fff71c864d1.tar.gz
chromium_src-1cb99158245ac2a36ea0ee5e20fd2fff71c864d1.tar.bz2
New FRAME_DURATION VideoFrameMetadata, with Cast Streaming use case.
Adds a new FRAME_DURATION option to VideoFrameMetadata, which can be used by consumers of video frames to improve performance (e.g., encoding quality). This change also adds population of the new metadata by the desktop/tab capture pipeline, and consumption by Cast Streaming's software VP8 encoder. Having accurate frame duration information improves the encoder's ability to choose a compression quality level that better meets the target encode bitrate. Later changes will require this in order to compute resource utilization feedback signals (see bug for details). BUG=156767 Review URL: https://codereview.chromium.org/1146723002 Cr-Commit-Position: refs/heads/master@{#330661}
Diffstat (limited to 'media')
-rw-r--r--media/base/video_frame_metadata.cc44
-rw-r--r--media/base/video_frame_metadata.h15
-rw-r--r--media/base/video_frame_unittest.cc8
-rw-r--r--media/cast/sender/vp8_encoder.cc22
4 files changed, 70 insertions, 19 deletions
diff --git a/media/base/video_frame_metadata.cc b/media/base/video_frame_metadata.cc
index d14bbe9..d663612 100644
--- a/media/base/video_frame_metadata.cc
+++ b/media/base/video_frame_metadata.cc
@@ -47,14 +47,27 @@ void VideoFrameMetadata::SetString(Key key, const std::string& value) {
base::BinaryValue::CreateWithCopiedBuffer(value.data(), value.size()));
}
-void VideoFrameMetadata::SetTimeTicks(Key key, const base::TimeTicks& value) {
+namespace {
+template<class TimeType>
+void SetTimeValue(VideoFrameMetadata::Key key,
+ const TimeType& value,
+ base::DictionaryValue* dictionary) {
const int64 internal_value = value.ToInternalValue();
- dictionary_.SetWithoutPathExpansion(
+ dictionary->SetWithoutPathExpansion(
ToInternalKey(key),
base::BinaryValue::CreateWithCopiedBuffer(
reinterpret_cast<const char*>(&internal_value),
sizeof(internal_value)));
}
+} // namespace
+
+void VideoFrameMetadata::SetTimeDelta(Key key, const base::TimeDelta& value) {
+ SetTimeValue(key, value, &dictionary_);
+}
+
+void VideoFrameMetadata::SetTimeTicks(Key key, const base::TimeTicks& value) {
+ SetTimeValue(key, value, &dictionary_);
+}
void VideoFrameMetadata::SetValue(Key key, scoped_ptr<base::Value> value) {
dictionary_.SetWithoutPathExpansion(ToInternalKey(key), value.Pass());
@@ -83,16 +96,27 @@ bool VideoFrameMetadata::GetString(Key key, std::string* value) const {
return !!binary_value;
}
-bool VideoFrameMetadata::GetTimeTicks(Key key, base::TimeTicks* value) const {
+namespace {
+template<class TimeType>
+bool ToTimeValue(const base::BinaryValue& binary_value, TimeType* value) {
DCHECK(value);
+ int64 internal_value;
+ if (binary_value.GetSize() != sizeof(internal_value))
+ return false;
+ memcpy(&internal_value, binary_value.GetBuffer(), sizeof(internal_value));
+ *value = TimeType::FromInternalValue(internal_value);
+ return true;
+}
+} // namespace
+
+bool VideoFrameMetadata::GetTimeDelta(Key key, base::TimeDelta* value) const {
const base::BinaryValue* const binary_value = GetBinaryValue(key);
- if (binary_value && binary_value->GetSize() == sizeof(int64)) {
- int64 internal_value;
- memcpy(&internal_value, binary_value->GetBuffer(), sizeof(internal_value));
- *value = base::TimeTicks::FromInternalValue(internal_value);
- return true;
- }
- return false;
+ return binary_value && ToTimeValue(*binary_value, value);
+}
+
+bool VideoFrameMetadata::GetTimeTicks(Key key, base::TimeTicks* value) const {
+ const base::BinaryValue* const binary_value = GetBinaryValue(key);
+ return binary_value && ToTimeValue(*binary_value, value);
}
const base::Value* VideoFrameMetadata::GetValue(Key key) const {
diff --git a/media/base/video_frame_metadata.h b/media/base/video_frame_metadata.h
index 31fbe74..de3045a 100644
--- a/media/base/video_frame_metadata.h
+++ b/media/base/video_frame_metadata.h
@@ -21,8 +21,19 @@ class MEDIA_EXPORT VideoFrameMetadata {
CAPTURE_BEGIN_TIME,
CAPTURE_END_TIME,
+ // The estimated duration of this frame (i.e., the amount of time between
+ // the media timestamp of this frame and the next). Note that this is not
+ // the same information provided by FRAME_RATE as the FRAME_DURATION can
+ // vary unpredictably for every frame. Consumers can use this to optimize
+ // playback scheduling, make encoding quality decisions, and/or compute
+ // frame-level resource utilization stats. Use Get/SetTimeDelta() for this
+ // key.
+ FRAME_DURATION,
+
// Represents either the fixed frame rate, or the maximum frame rate to
- // expect from a variable-rate source. Use Get/SetDouble() for this key.
+ // expect from a variable-rate source. This value generally remains the
+ // same for all frames in the same session. Use Get/SetDouble() for this
+ // key.
FRAME_RATE,
NUM_KEYS
@@ -40,6 +51,7 @@ class MEDIA_EXPORT VideoFrameMetadata {
void SetInteger(Key key, int value);
void SetDouble(Key key, double value);
void SetString(Key key, const std::string& value);
+ void SetTimeDelta(Key key, const base::TimeDelta& value);
void SetTimeTicks(Key key, const base::TimeTicks& value);
void SetValue(Key key, scoped_ptr<base::Value> value);
@@ -48,6 +60,7 @@ class MEDIA_EXPORT VideoFrameMetadata {
bool GetInteger(Key key, int* value) const WARN_UNUSED_RESULT;
bool GetDouble(Key key, double* value) const WARN_UNUSED_RESULT;
bool GetString(Key key, std::string* value) const WARN_UNUSED_RESULT;
+ bool GetTimeDelta(Key key, base::TimeDelta* value) const WARN_UNUSED_RESULT;
bool GetTimeTicks(Key key, base::TimeTicks* value) const WARN_UNUSED_RESULT;
// Returns null if |key| was not present.
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index e22a818..d3d56d7 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -379,6 +379,14 @@ TEST(VideoFrameMetadata, SetAndThenGetAllKeysForAllTypes) {
metadata.Clear();
EXPECT_FALSE(metadata.HasKey(key));
+ metadata.SetTimeDelta(key, base::TimeDelta::FromInternalValue(42 + i));
+ EXPECT_TRUE(metadata.HasKey(key));
+ base::TimeDelta delta_value;
+ EXPECT_TRUE(metadata.GetTimeDelta(key, &delta_value));
+ EXPECT_EQ(base::TimeDelta::FromInternalValue(42 + i), delta_value);
+ metadata.Clear();
+
+ EXPECT_FALSE(metadata.HasKey(key));
metadata.SetTimeTicks(key, base::TimeTicks::FromInternalValue(~(0LL) + i));
EXPECT_TRUE(metadata.HasKey(key));
base::TimeTicks ticks_value;
diff --git a/media/cast/sender/vp8_encoder.cc b/media/cast/sender/vp8_encoder.cc
index 4d397b6..b68935c 100644
--- a/media/cast/sender/vp8_encoder.cc
+++ b/media/cast/sender/vp8_encoder.cc
@@ -215,20 +215,26 @@ void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
// The frame duration given to the VP8 codec affects a number of important
// behaviors, including: per-frame bandwidth, CPU time spent encoding,
// temporal quality trade-offs, and key/golden/alt-ref frame generation
- // intervals. Use the actual amount of time between the current and previous
- // frames as a prediction for the next frame's duration, but bound the
- // prediction to account for the fact that the frame rate can be highly
- // variable, including long pauses in the video stream.
+ // intervals. Bound the prediction to account for the fact that the frame
+ // rate can be highly variable, including long pauses in the video stream.
const base::TimeDelta minimum_frame_duration =
base::TimeDelta::FromSecondsD(1.0 / cast_config_.max_frame_rate);
const base::TimeDelta maximum_frame_duration =
base::TimeDelta::FromSecondsD(static_cast<double>(kRestartFramePeriods) /
cast_config_.max_frame_rate);
- const base::TimeDelta last_frame_duration =
- video_frame->timestamp() - last_frame_timestamp_;
- const base::TimeDelta predicted_frame_duration =
+ base::TimeDelta predicted_frame_duration;
+ if (!video_frame->metadata()->GetTimeDelta(
+ media::VideoFrameMetadata::FRAME_DURATION,
+ &predicted_frame_duration) ||
+ predicted_frame_duration <= base::TimeDelta()) {
+ // The source of the video frame did not provide the frame duration. Use
+ // the actual amount of time between the current and previous frame as a
+ // prediction for the next frame's duration.
+ predicted_frame_duration = video_frame->timestamp() - last_frame_timestamp_;
+ }
+ predicted_frame_duration =
std::max(minimum_frame_duration,
- std::min(maximum_frame_duration, last_frame_duration));
+ std::min(maximum_frame_duration, predicted_frame_duration));
last_frame_timestamp_ = video_frame->timestamp();
// Encode the frame. The presentation time stamp argument here is fixed to