summaryrefslogtreecommitdiffstats
path: root/media/cast/test
diff options
context:
space:
mode:
authorhclam <hclam@chromium.org>2014-11-22 06:34:25 -0800
committerCommit bot <commit-bot@chromium.org>2014-11-22 14:34:46 +0000
commit98b856c2dde992c3f12a0b412a8888db8801241c (patch)
tree9d74a45a972fa615cce07c090417fd638f943f7b /media/cast/test
parent0de567fefcf64abba751cae797e737c69dfe6f8f (diff)
downloadchromium_src-98b856c2dde992c3f12a0b412a8888db8801241c.zip
chromium_src-98b856c2dde992c3f12a0b412a8888db8801241c.tar.gz
chromium_src-98b856c2dde992c3f12a0b412a8888db8801241c.tar.bz2
Cast Streaming: Measure PSNR and SSIM in simulation
Add a couple features to cast_simulator: * Measures PSNR and SSIM from the decoded output. * Writes YUV decoded frames into a file. PSNR and SSIM can be used to measure the quality impact of encoder changes. Writing to YUV files help to perform manual inspection. BUG=None Review URL: https://codereview.chromium.org/737203002 Cr-Commit-Position: refs/heads/master@{#305362}
Diffstat (limited to 'media/cast/test')
-rw-r--r--media/cast/test/fake_media_source.cc17
-rw-r--r--media/cast/test/fake_media_source.h8
-rw-r--r--media/cast/test/loopback_transport.cc10
-rw-r--r--media/cast/test/loopback_transport.h4
-rw-r--r--media/cast/test/proto/network_simulation_model.proto5
-rw-r--r--media/cast/test/sender.cc2
-rw-r--r--media/cast/test/simulator.cc273
-rw-r--r--media/cast/test/utility/video_utility.cc22
-rw-r--r--media/cast/test/utility/video_utility.h4
9 files changed, 289 insertions, 56 deletions
diff --git a/media/cast/test/fake_media_source.cc b/media/cast/test/fake_media_source.cc
index d687ef3..fb30ea9 100644
--- a/media/cast/test/fake_media_source.cc
+++ b/media/cast/test/fake_media_source.cc
@@ -59,9 +59,11 @@ namespace cast {
FakeMediaSource::FakeMediaSource(
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
base::TickClock* clock,
- const VideoSenderConfig& video_config)
+ const VideoSenderConfig& video_config,
+ bool keep_frames)
: task_runner_(task_runner),
video_config_(video_config),
+ keep_frames_(keep_frames),
synthetic_count_(0),
clock_(clock),
audio_frame_count_(0),
@@ -245,6 +247,8 @@ void FakeMediaSource::SendNextFakeFrame() {
base::TimeDelta video_time = VideoFrameTime(++video_frame_count_);
video_frame->set_timestamp(video_time);
+ if (keep_frames_)
+ inserted_video_frame_queue_.push(video_frame);
video_frame_input_->InsertRawVideoFrame(video_frame,
start_time_ + video_time);
@@ -319,6 +323,8 @@ bool FakeMediaSource::SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
// Use the timestamp from the file if we're transcoding.
video_frame->set_timestamp(ScaleTimestamp(decoded_frame->timestamp()));
+ if (keep_frames_)
+ inserted_video_frame_queue_.push(video_frame);
video_frame_input_->InsertRawVideoFrame(
video_frame, start_time_ + video_frame->timestamp());
@@ -576,6 +582,15 @@ void FakeMediaSource::ProvideData(int frame_delay,
}
}
+scoped_refptr<media::VideoFrame>
+FakeMediaSource::PopOldestInsertedVideoFrame() {
+ CHECK(!inserted_video_frame_queue_.empty());
+ scoped_refptr<media::VideoFrame> video_frame =
+ inserted_video_frame_queue_.front();
+ inserted_video_frame_queue_.pop();
+ return video_frame;
+}
+
AVStream* FakeMediaSource::av_audio_stream() {
return av_format_context_->streams[audio_stream_index_];
}
diff --git a/media/cast/test/fake_media_source.h b/media/cast/test/fake_media_source.h
index 4e6a4c3..f0822ce 100644
--- a/media/cast/test/fake_media_source.h
+++ b/media/cast/test/fake_media_source.h
@@ -46,9 +46,11 @@ class FakeMediaSource {
// |task_runner| is to schedule decoding tasks.
// |clock| is used by this source but is not owned.
// |video_config| is the desired video config.
+ // |keep_frames| is true if all VideoFrames are saved in a queue.
FakeMediaSource(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
base::TickClock* clock,
- const VideoSenderConfig& video_config);
+ const VideoSenderConfig& video_config,
+ bool keep_frames);
~FakeMediaSource();
// Transcode this file as the source of video and audio frames.
@@ -60,6 +62,8 @@ class FakeMediaSource {
const VideoSenderConfig& get_video_config() const { return video_config_; }
+ scoped_refptr<media::VideoFrame> PopOldestInsertedVideoFrame();
+
private:
bool is_transcoding_audio() const { return audio_stream_index_ >= 0; }
bool is_transcoding_video() const { return video_stream_index_ >= 0; }
@@ -99,6 +103,7 @@ class FakeMediaSource {
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
const VideoSenderConfig video_config_;
+ const bool keep_frames_;
scoped_refptr<AudioFrameInput> audio_frame_input_;
scoped_refptr<VideoFrameInput> video_frame_input_;
uint8 synthetic_count_;
@@ -135,6 +140,7 @@ class FakeMediaSource {
scoped_ptr<media::AudioTimestampHelper> audio_sent_ts_;
std::queue<scoped_refptr<VideoFrame> > video_frame_queue_;
+ std::queue<scoped_refptr<VideoFrame> > inserted_video_frame_queue_;
int64 video_first_pts_;
bool video_first_pts_set_;
base::TimeDelta last_video_frame_timestamp_;
diff --git a/media/cast/test/loopback_transport.cc b/media/cast/test/loopback_transport.cc
index 2b32fe33..45f708d 100644
--- a/media/cast/test/loopback_transport.cc
+++ b/media/cast/test/loopback_transport.cc
@@ -64,9 +64,13 @@ void LoopBackTransport::Initialize(
base::TickClock* clock) {
scoped_ptr<test::PacketPipe> loopback_pipe(
new LoopBackPacketPipe(packet_receiver));
- // Append the loopback pipe to the end.
- pipe->AppendToPipe(loopback_pipe.Pass());
- packet_pipe_ = pipe.Pass();
+ if (pipe) {
+ // Append the loopback pipe to the end.
+ pipe->AppendToPipe(loopback_pipe.Pass());
+ packet_pipe_ = pipe.Pass();
+ } else {
+ packet_pipe_ = loopback_pipe.Pass();
+ }
packet_pipe_->InitOnIOThread(task_runner, clock);
}
diff --git a/media/cast/test/loopback_transport.h b/media/cast/test/loopback_transport.h
index 2aca2f3..0b42560 100644
--- a/media/cast/test/loopback_transport.h
+++ b/media/cast/test/loopback_transport.h
@@ -35,8 +35,12 @@ class LoopBackTransport : public PacketSender {
// Initiailize this loopback transport.
// Establish a flow of packets from |pipe| to |packet_receiver|.
+ //
// The data flow looks like:
// SendPacket() -> |pipe| -> Fake loopback pipe -> |packet_receiver|.
+ //
+ // If |pipe| is NULL then the data flow looks like:
+ // SendPacket() -> Fake loopback pipe -> |packet_receiver|.
void Initialize(
scoped_ptr<test::PacketPipe> pipe,
const PacketReceiverCallback& packet_receiver,
diff --git a/media/cast/test/proto/network_simulation_model.proto b/media/cast/test/proto/network_simulation_model.proto
index 902712f..4785da3 100644
--- a/media/cast/test/proto/network_simulation_model.proto
+++ b/media/cast/test/proto/network_simulation_model.proto
@@ -16,7 +16,11 @@ message NetworkSimulationModel {
}
enum NetworkSimulationModelType {
+ // Network simulation based on interrupted poisson process.
INTERRUPTED_POISSON_PROCESS = 1;
+
+ // No network simulation.
+ NO_SIMULATION = 2;
}
message IPPModel {
@@ -24,4 +28,3 @@ message IPPModel {
optional double coef_variance = 2;
repeated double average_rate = 3;
}
-
diff --git a/media/cast/test/sender.cc b/media/cast/test/sender.cc
index 7ec1931..07efb294 100644
--- a/media/cast/test/sender.cc
+++ b/media/cast/test/sender.cc
@@ -303,7 +303,7 @@ int main(int argc, char** argv) {
scoped_ptr<media::cast::FakeMediaSource> fake_media_source(
new media::cast::FakeMediaSource(test_thread.message_loop_proxy(),
cast_environment->Clock(),
- video_config));
+ video_config, false));
int override_fps = 0;
if (!base::StringToInt(cmd->GetSwitchValueASCII(kSwitchFps),
diff --git a/media/cast/test/simulator.cc b/media/cast/test/simulator.cc
index 6108438..7122c8f 100644
--- a/media/cast/test/simulator.cc
+++ b/media/cast/test/simulator.cc
@@ -22,6 +22,13 @@
// --run-time=
// In seconds, how long the Cast session runs for.
// Optional; default is 180.
+// --metrics-output=
+// File path to write PSNR and SSIM metrics between source frames and
+// decoded frames. Assumes all encoded frames are decoded.
+// --yuv-output=
+// File path to write YUV decoded frames in YUV4MPEG2 format.
+// --no-simulation
+// Do not run network simulation.
//
// Output:
// - Raw event log of the simulation session tagged with the unique test ID,
@@ -38,6 +45,7 @@
#include "base/logging.h"
#include "base/path_service.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/thread_task_runner_handle.h"
#include "base/time/tick_clock.h"
@@ -77,15 +85,18 @@ using media::cast::proto::NetworkSimulationModelType;
namespace media {
namespace cast {
namespace {
-const char kSourcePath[] = "source";
+const char kLibDir[] = "lib-dir";
const char kModelPath[] = "model";
+const char kMetricsOutputPath[] = "metrics-output";
const char kOutputPath[] = "output";
-const char kSimulationId[] = "sim-id";
-const char kLibDir[] = "lib-dir";
-const char kTargetDelay[] = "target-delay-ms";
const char kMaxFrameRate[] = "max-frame-rate";
-const char kSourceFrameRate[] = "source-frame-rate";
+const char kNoSimulation[] = "no-simulation";
const char kRunTime[] = "run-time";
+const char kSimulationId[] = "sim-id";
+const char kSourcePath[] = "source";
+const char kSourceFrameRate[] = "source-frame-rate";
+const char kTargetDelay[] = "target-delay-ms";
+const char kYuvOutputPath[] = "yuv-output";
int GetIntegerSwitchValue(const char* switch_name, int default_value) {
const std::string as_str =
@@ -149,15 +160,114 @@ void LogTransportEvents(const scoped_refptr<CastEnvironment>& env,
}
}
+// Maintains a queue of encoded video frames.
+// This works by tracking FRAME_CAPTURE_END and FRAME_ENCODED events.
+// If a video frame is detected to be encoded it transfers a frame
+// from FakeMediaSource to its internal queue. Otherwise it drops a
+// frame from FakeMediaSource.
+class EncodedVideoFrameTracker : public RawEventSubscriber {
+ public:
+ EncodedVideoFrameTracker(FakeMediaSource* media_source)
+ : media_source_(media_source),
+ last_frame_event_type_(UNKNOWN) {}
+ ~EncodedVideoFrameTracker() override {}
+
+ // RawEventSubscriber implementations.
+ void OnReceiveFrameEvent(const FrameEvent& frame_event) override {
+ // This method only cares about video FRAME_CAPTURE_END and
+ // FRAME_ENCODED events.
+ if (frame_event.media_type != VIDEO_EVENT) {
+ return;
+ }
+ if (frame_event.type != FRAME_CAPTURE_END &&
+ frame_event.type != FRAME_ENCODED) {
+ return;
+ }
+ // If there are two consecutive FRAME_CAPTURE_END events that means
+ // a frame is dropped.
+ if (last_frame_event_type_ == FRAME_CAPTURE_END &&
+ frame_event.type == FRAME_CAPTURE_END) {
+ media_source_->PopOldestInsertedVideoFrame();
+ }
+ if (frame_event.type == FRAME_ENCODED) {
+ video_frames_.push(media_source_->PopOldestInsertedVideoFrame());
+ }
+ last_frame_event_type_ = frame_event.type;
+ }
+
+ void OnReceivePacketEvent(const PacketEvent& packet_event) override {
+ // Don't care.
+ }
+
+ scoped_refptr<media::VideoFrame> PopOldestEncodedFrame() {
+ CHECK(!video_frames_.empty());
+ scoped_refptr<media::VideoFrame> video_frame = video_frames_.front();
+ video_frames_.pop();
+ return video_frame;
+ }
+
+ private:
+ FakeMediaSource* media_source_;
+ CastLoggingEvent last_frame_event_type_;
+ std::queue<scoped_refptr<media::VideoFrame> > video_frames_;
+
+ DISALLOW_COPY_AND_ASSIGN(EncodedVideoFrameTracker);
+};
+
+// Appends a YUV frame in I420 format to the file located at |path|.
+void AppendYuvToFile(const base::FilePath& path,
+ scoped_refptr<media::VideoFrame> frame) {
+ // Write YUV420 format to file.
+ std::string header = "FRAME\n";
+ AppendToFile(path, header.data(), header.size());
+ AppendToFile(path,
+ reinterpret_cast<char*>(frame->data(media::VideoFrame::kYPlane)),
+ frame->stride(media::VideoFrame::kYPlane) *
+ frame->rows(media::VideoFrame::kYPlane));
+ AppendToFile(path,
+ reinterpret_cast<char*>(frame->data(media::VideoFrame::kUPlane)),
+ frame->stride(media::VideoFrame::kUPlane) *
+ frame->rows(media::VideoFrame::kUPlane));
+ AppendToFile(path,
+ reinterpret_cast<char*>(frame->data(media::VideoFrame::kVPlane)),
+ frame->stride(media::VideoFrame::kVPlane) *
+ frame->rows(media::VideoFrame::kVPlane));
+}
+
+// A container to save output of GotVideoFrame() for computation based
+// on output frames.
+struct GotVideoFrameOutput {
+ GotVideoFrameOutput() : counter(0) {}
+ int counter;
+ std::vector<double> psnr;
+ std::vector<double> ssim;
+};
+
void GotVideoFrame(
- int* counter,
+ GotVideoFrameOutput* metrics_output,
+ const base::FilePath& yuv_output,
+ EncodedVideoFrameTracker* video_frame_tracker,
CastReceiver* cast_receiver,
const scoped_refptr<media::VideoFrame>& video_frame,
const base::TimeTicks& render_time,
bool continuous) {
- ++*counter;
+ ++metrics_output->counter;
cast_receiver->RequestDecodedVideoFrame(
- base::Bind(&GotVideoFrame, counter, cast_receiver));
+ base::Bind(&GotVideoFrame, metrics_output, yuv_output,
+ video_frame_tracker, cast_receiver));
+
+ // If |video_frame_tracker| is available that means we're computing
+ // quality metrices.
+ if (video_frame_tracker) {
+ scoped_refptr<media::VideoFrame> src_frame =
+ video_frame_tracker->PopOldestEncodedFrame();
+ metrics_output->psnr.push_back(I420PSNR(src_frame, video_frame));
+ metrics_output->ssim.push_back(I420SSIM(src_frame, video_frame));
+ }
+
+ if (!yuv_output.empty()) {
+ AppendYuvToFile(yuv_output, video_frame);
+ }
}
void GotAudioFrame(
@@ -204,10 +314,12 @@ void AppendLogToFile(media::cast::proto::LogMetadata* metadata,
// Run simulation once.
//
-// |output_path| is the path to write serialized log.
+// |log_output_path| is the path to write serialized log.
// |extra_data| is extra tagging information to write to log.
void RunSimulation(const base::FilePath& source_path,
- const base::FilePath& output_path,
+ const base::FilePath& log_output_path,
+ const base::FilePath& metrics_output_path,
+ const base::FilePath& yuv_output_path,
const std::string& extra_data,
const NetworkSimulationModel& model) {
// Fake clock. Make sure start time is non zero.
@@ -297,42 +409,63 @@ void RunSimulation(const base::FilePath& source_path,
scoped_ptr<CastSender> cast_sender(
CastSender::Create(sender_env, transport_sender.get()));
- // Build packet pipe.
- if (model.type() != media::cast::proto::INTERRUPTED_POISSON_PROCESS) {
- LOG(ERROR) << "Unknown model type " << model.type() << ".";
- return;
+ // Initialize network simulation model.
+ const bool use_network_simulation =
+ model.type() == media::cast::proto::INTERRUPTED_POISSON_PROCESS;
+ scoped_ptr<test::InterruptedPoissonProcess> ipp;
+ if (use_network_simulation) {
+ LOG(INFO) << "Running Poisson based network simulation.";
+ const IPPModel& ipp_model = model.ipp();
+ std::vector<double> average_rates(ipp_model.average_rate_size());
+ std::copy(ipp_model.average_rate().begin(),
+ ipp_model.average_rate().end(),
+ average_rates.begin());
+ ipp.reset(new test::InterruptedPoissonProcess(
+ average_rates,
+ ipp_model.coef_burstiness(), ipp_model.coef_variance(), 0));
+ receiver_to_sender.Initialize(
+ ipp->NewBuffer(128 * 1024).Pass(),
+ transport_sender->PacketReceiverForTesting(),
+ task_runner, &testing_clock);
+ sender_to_receiver.Initialize(
+ ipp->NewBuffer(128 * 1024).Pass(),
+ cast_receiver->packet_receiver(), task_runner,
+ &testing_clock);
+ } else {
+ LOG(INFO) << "No network simulation.";
+ receiver_to_sender.Initialize(
+ scoped_ptr<test::PacketPipe>(),
+ transport_sender->PacketReceiverForTesting(),
+ task_runner, &testing_clock);
+ sender_to_receiver.Initialize(
+ scoped_ptr<test::PacketPipe>(),
+ cast_receiver->packet_receiver(), task_runner,
+ &testing_clock);
}
- const IPPModel& ipp_model = model.ipp();
-
- std::vector<double> average_rates(ipp_model.average_rate_size());
- std::copy(ipp_model.average_rate().begin(), ipp_model.average_rate().end(),
- average_rates.begin());
- test::InterruptedPoissonProcess ipp(average_rates,
- ipp_model.coef_burstiness(), ipp_model.coef_variance(), 0);
+ // Initialize a fake media source and a tracker to encoded video frames.
+ const bool quality_test = !metrics_output_path.empty();
+ FakeMediaSource media_source(task_runner,
+ &testing_clock,
+ video_sender_config,
+ quality_test);
+ scoped_ptr<EncodedVideoFrameTracker> video_frame_tracker;
+ if (quality_test) {
+ video_frame_tracker.reset(new EncodedVideoFrameTracker(&media_source));
+ sender_env->Logging()->AddRawEventSubscriber(video_frame_tracker.get());
+ }
- // Connect sender to receiver. This initializes the pipe.
- receiver_to_sender.Initialize(
- ipp.NewBuffer(128 * 1024).Pass(),
- transport_sender->PacketReceiverForTesting(),
- task_runner, &testing_clock);
- sender_to_receiver.Initialize(
- ipp.NewBuffer(128 * 1024).Pass(),
- cast_receiver->packet_receiver(), task_runner,
- &testing_clock);
+ // Quality metrics computed for each frame decoded.
+ GotVideoFrameOutput metrics_output;
// Start receiver.
int audio_frame_count = 0;
- int video_frame_count = 0;
cast_receiver->RequestDecodedVideoFrame(
- base::Bind(&GotVideoFrame, &video_frame_count, cast_receiver.get()));
+ base::Bind(&GotVideoFrame, &metrics_output, yuv_output_path,
+ video_frame_tracker.get(), cast_receiver.get()));
cast_receiver->RequestDecodedAudioFrame(
base::Bind(&GotAudioFrame, &audio_frame_count, cast_receiver.get()));
- FakeMediaSource media_source(task_runner,
- &testing_clock,
- video_sender_config);
-
// Initializing audio and video senders.
cast_sender->InitializeAudio(audio_sender_config,
base::Bind(&AudioInitializationStatus));
@@ -342,6 +475,24 @@ void RunSimulation(const base::FilePath& source_path,
CreateDefaultVideoEncodeMemoryCallback());
task_runner->RunTasks();
+ // Truncate YUV files to prepare for writing.
+ if (!yuv_output_path.empty()) {
+ base::ScopedFILE file(base::OpenFile(yuv_output_path, "wb"));
+ if (!file.get()) {
+ LOG(ERROR) << "Cannot save YUV output to file.";
+ return;
+ }
+ LOG(INFO) << "Writing YUV output to file: " << yuv_output_path.value();
+
+ // Write YUV4MPEG2 header.
+ std::string header;
+ base::StringAppendF(
+ &header, "YUV4MPEG2 W%d H%d F30000:1001 Ip A1:1 C420\n",
+ media_source.get_video_config().width,
+ media_source.get_video_config().height);
+ AppendToFile(yuv_output_path, header.data(), header.size());
+ }
+
// Start sending.
if (!source_path.empty()) {
// 0 means using the FPS from the file.
@@ -351,7 +502,8 @@ void RunSimulation(const base::FilePath& source_path,
media_source.Start(cast_sender->audio_frame_input(),
cast_sender->video_frame_input());
- // Run for 3 minutes.
+ // By default runs simulation for 3 minutes or the desired duration
+ // by using --run-time= flag.
base::TimeDelta elapsed_time;
const base::TimeDelta desired_run_time =
base::TimeDelta::FromSeconds(GetIntegerSwitchValue(kRunTime, 180));
@@ -422,8 +574,9 @@ void RunSimulation(const base::FilePath& source_path,
LOG(INFO) << "Configured target playout delay (ms): "
<< video_receiver_config.rtp_max_delay_ms;
LOG(INFO) << "Audio frame count: " << audio_frame_count;
- LOG(INFO) << "Total video frames: " << total_video_frames;
- LOG(INFO) << "Dropped video frames " << dropped_video_frames;
+ LOG(INFO) << "Inserted video frames: " << total_video_frames;
+ LOG(INFO) << "Decoded video frames: " << metrics_output.counter;
+ LOG(INFO) << "Dropped video frames: " << dropped_video_frames;
LOG(INFO) << "Late video frames: " << late_video_frames
<< " (average lateness: "
<< (late_video_frames > 0 ?
@@ -433,20 +586,32 @@ void RunSimulation(const base::FilePath& source_path,
<< " ms)";
LOG(INFO) << "Average encoded bitrate (kbps): " << avg_encoded_bitrate;
LOG(INFO) << "Average target bitrate (kbps): " << avg_target_bitrate;
- LOG(INFO) << "Writing log: " << output_path.value();
+ LOG(INFO) << "Writing log: " << log_output_path.value();
// Truncate file and then write serialized log.
{
- base::ScopedFILE file(base::OpenFile(output_path, "wb"));
+ base::ScopedFILE file(base::OpenFile(log_output_path, "wb"));
if (!file.get()) {
LOG(INFO) << "Cannot write to log.";
return;
}
}
AppendLogToFile(&video_metadata, video_frame_events, video_packet_events,
- output_path);
+ log_output_path);
AppendLogToFile(&audio_metadata, audio_frame_events, audio_packet_events,
- output_path);
+ log_output_path);
+
+ // Write quality metrics.
+ if (quality_test) {
+ LOG(INFO) << "Writing quality metrics: " << metrics_output_path.value();
+ std::string line;
+ for (size_t i = 0; i < metrics_output.psnr.size() &&
+ i < metrics_output.ssim.size(); ++i) {
+ base::StringAppendF(&line, "%f %f\n", metrics_output.psnr[i],
+ metrics_output.ssim[i]);
+ }
+ WriteFile(metrics_output_path, line.data(), line.length());
+ }
}
NetworkSimulationModel DefaultModel() {
@@ -498,8 +663,13 @@ bool IsModelValid(const NetworkSimulationModel& model) {
}
NetworkSimulationModel LoadModel(const base::FilePath& model_path) {
+ if (CommandLine::ForCurrentProcess()->HasSwitch(kNoSimulation)) {
+ NetworkSimulationModel model;
+ model.set_type(media::cast::proto::NO_SIMULATION);
+ return model;
+ }
if (model_path.empty()) {
- LOG(ERROR) << "Model path not set.";
+ LOG(ERROR) << "Model path not set; Using default model.";
return DefaultModel();
}
std::string model_str;
@@ -546,12 +716,16 @@ int main(int argc, char** argv) {
base::FilePath source_path = cmd->GetSwitchValuePath(
media::cast::kSourcePath);
- base::FilePath output_path = cmd->GetSwitchValuePath(
+ base::FilePath log_output_path = cmd->GetSwitchValuePath(
media::cast::kOutputPath);
- if (output_path.empty()) {
- base::GetTempDir(&output_path);
- output_path = output_path.AppendASCII("sim-events.gz");
+ if (log_output_path.empty()) {
+ base::GetTempDir(&log_output_path);
+ log_output_path = log_output_path.AppendASCII("sim-events.gz");
}
+ base::FilePath metrics_output_path = cmd->GetSwitchValuePath(
+ media::cast::kMetricsOutputPath);
+ base::FilePath yuv_output_path = cmd->GetSwitchValuePath(
+ media::cast::kYuvOutputPath);
std::string sim_id = cmd->GetSwitchValueASCII(media::cast::kSimulationId);
NetworkSimulationModel model = media::cast::LoadModel(
@@ -565,6 +739,7 @@ int main(int argc, char** argv) {
base::JSONWriter::Write(&values, &extra_data);
// Run.
- media::cast::RunSimulation(source_path, output_path, extra_data, model);
+ media::cast::RunSimulation(source_path, log_output_path, metrics_output_path,
+ yuv_output_path, extra_data, model);
return 0;
}
diff --git a/media/cast/test/utility/video_utility.cc b/media/cast/test/utility/video_utility.cc
index b94c99c..ed72c89 100644
--- a/media/cast/test/utility/video_utility.cc
+++ b/media/cast/test/utility/video_utility.cc
@@ -36,6 +36,28 @@ double I420PSNR(const scoped_refptr<media::VideoFrame>& frame1,
frame1->coded_size().height());
}
+double I420SSIM(const scoped_refptr<media::VideoFrame>& frame1,
+ const scoped_refptr<media::VideoFrame>& frame2) {
+ if (frame1->coded_size().width() != frame2->coded_size().width() ||
+ frame1->coded_size().height() != frame2->coded_size().height())
+ return -1;
+
+ return libyuv::I420Ssim(frame1->data(VideoFrame::kYPlane),
+ frame1->stride(VideoFrame::kYPlane),
+ frame1->data(VideoFrame::kUPlane),
+ frame1->stride(VideoFrame::kUPlane),
+ frame1->data(VideoFrame::kVPlane),
+ frame1->stride(VideoFrame::kVPlane),
+ frame2->data(VideoFrame::kYPlane),
+ frame2->stride(VideoFrame::kYPlane),
+ frame2->data(VideoFrame::kUPlane),
+ frame2->stride(VideoFrame::kUPlane),
+ frame2->data(VideoFrame::kVPlane),
+ frame2->stride(VideoFrame::kVPlane),
+ frame1->coded_size().width(),
+ frame1->coded_size().height());
+}
+
void PopulateVideoFrame(VideoFrame* frame, int start_value) {
int height = frame->coded_size().height();
int stride_y = frame->stride(VideoFrame::kYPlane);
diff --git a/media/cast/test/utility/video_utility.h b/media/cast/test/utility/video_utility.h
index bbb9865..32bd5c0 100644
--- a/media/cast/test/utility/video_utility.h
+++ b/media/cast/test/utility/video_utility.h
@@ -13,6 +13,10 @@ namespace cast {
double I420PSNR(const scoped_refptr<media::VideoFrame>& frame1,
const scoped_refptr<media::VideoFrame>& frame2);
+// Compute and return SSIM between two frames.
+double I420SSIM(const scoped_refptr<media::VideoFrame>& frame1,
+ const scoped_refptr<media::VideoFrame>& frame2);
+
// Populate a video frame with values starting with the given start value.
// Width, height and stride should be set in advance.
// Memory is allocated within the function.