1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
|
// Copyright (c) 2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Standalone benchmarking application based on FFmpeg. This tool is used to
// measure decoding performance between different FFmpeg compile and run-time
// options. We also use this tool to measure performance regressions when
// testing newer builds of FFmpeg from trunk.
//
// This tool requires FFMPeg DLL's built with --enable-protocol=file.
#include <iomanip>
#include <iostream>
#include <string>
#include "base/at_exit.h"
#include "base/basictypes.h"
#include "base/command_line.h"
#include "base/file_path.h"
#include "base/file_util.h"
#include "base/logging.h"
#include "base/string_util.h"
#include "base/time.h"
#include "media/base/media.h"
#include "media/bench/file_protocol.h"
#include "media/filters/ffmpeg_common.h"
#include "media/filters/ffmpeg_video_decoder.h"
namespace switches {
const wchar_t kStream[] = L"stream";
const wchar_t kVideoThreads[] = L"video-threads";
const wchar_t kFast2[] = L"fast2";
const wchar_t kSkip[] = L"skip";
const wchar_t kFlush[] = L"flush";
} // namespace switches
int main(int argc, const char** argv) {
base::AtExitManager exit_manager;
CommandLine::Init(argc, argv);
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
std::vector<std::wstring> filenames(cmd_line->GetLooseValues());
if (filenames.empty()) {
std::cerr << "Usage: media_bench [OPTIONS] FILE [DUMPFILE]\n"
<< " --stream=[audio|video] "
<< "Benchmark either the audio or video stream\n"
<< " --video-threads=N "
<< "Decode video using N threads\n"
<< " --fast2 "
<< "Enable fast2 flag\n"
<< " --flush "
<< "Flush last frame\n"
<< " --skip=[1|2|3] "
<< "1=loop nonref, 2=loop, 3= frame nonref\n" << std::endl;
return 1;
}
// Initialize our media library (try loading DLLs, etc.) before continuing.
// We use an empty file path as the parameter to force searching of the
// default locations for necessary DLLs and DSOs.
if (media::InitializeMediaLibrary(FilePath()) == false) {
std::cerr << "Unable to initialize the media library.";
return 1;
}
// Retrieve command line options.
std::string in_path(WideToUTF8(filenames[0]));
std::string out_path;
if (filenames.size() > 1) {
out_path = WideToUTF8(filenames[1]);
}
CodecType target_codec = CODEC_TYPE_UNKNOWN;
int video_threads = 0;
// Determine whether to benchmark audio or video decoding.
std::wstring stream(cmd_line->GetSwitchValue(switches::kStream));
if (!stream.empty()) {
if (stream.compare(L"audio") == 0) {
target_codec = CODEC_TYPE_AUDIO;
} else if (stream.compare(L"video") == 0) {
target_codec = CODEC_TYPE_VIDEO;
} else {
std::cerr << "Unknown --stream option " << stream << std::endl;
return 1;
}
}
// Determine number of threads to use for video decoding (optional).
std::wstring threads(cmd_line->GetSwitchValue(switches::kVideoThreads));
if (!threads.empty() &&
!StringToInt(WideToUTF16Hack(threads), &video_threads)) {
video_threads = 0;
}
bool fast2 = false;
if (cmd_line->HasSwitch(switches::kFast2)) {
fast2 = true;
}
bool flush = false;
if (cmd_line->HasSwitch(switches::kFlush)) {
flush = true;
}
int skip = 0;
if (cmd_line->HasSwitch(switches::kSkip)) {
std::wstring skip_opt(cmd_line->GetSwitchValue(switches::kSkip));
if (!StringToInt(WideToUTF16Hack(skip_opt), &skip)) {
skip = 0;
}
}
// Register FFmpeg and attempt to open file.
avcodec_init();
av_register_all();
av_register_protocol(&kFFmpegFileProtocol);
AVFormatContext* format_context = NULL;
if (av_open_input_file(&format_context, in_path.c_str(), NULL, 0, NULL) < 0) {
std::cerr << "Could not open " << in_path << std::endl;
return 1;
}
// Open output file.
FILE *output = NULL;
if (!out_path.empty()) {
output = file_util::OpenFile(out_path.c_str(), "wb");
if (!output) {
LOG(ERROR) << "could not open output";
return 1;
}
}
// Parse a little bit of the stream to fill out the format context.
if (av_find_stream_info(format_context) < 0) {
std::cerr << "Could not find stream info for " << in_path << std::endl;
return 1;
}
// Find our target stream.
int target_stream = -1;
for (size_t i = 0; i < format_context->nb_streams; ++i) {
AVCodecContext* codec_context = format_context->streams[i]->codec;
AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
// See if we found our target codec.
if (codec_context->codec_type == target_codec && target_stream < 0) {
std::cout << "* ";
target_stream = i;
} else {
std::cout << " ";
}
if (codec_context->codec_type == CODEC_TYPE_UNKNOWN) {
std::cout << "Stream #" << i << ": Unknown" << std::endl;
} else {
// Print out stream information
std::cout << "Stream #" << i << ": " << codec->name << " ("
<< codec->long_name << ")" << std::endl;
}
}
// Only continue if we found our target stream.
if (target_stream < 0) {
return 1;
}
// Prepare FFmpeg structures.
AVPacket packet;
AVCodecContext* codec_context = format_context->streams[target_stream]->codec;
AVCodec* codec = avcodec_find_decoder(codec_context->codec_id);
if (skip == 1) {
codec_context->skip_loop_filter = AVDISCARD_NONREF;
} else if (skip == 2) {
codec_context->skip_loop_filter = AVDISCARD_ALL;
} else if (skip == 3) {
codec_context->skip_loop_filter = AVDISCARD_ALL;
codec_context->skip_frame = AVDISCARD_NONREF;
}
if (fast2) {
codec_context->flags2 |= CODEC_FLAG2_FAST;
}
// Initialize threaded decode.
if (target_codec == CODEC_TYPE_VIDEO && video_threads > 0) {
if (avcodec_thread_init(codec_context, video_threads) < 0) {
std::cerr << "WARNING: Could not initialize threading!\n"
<< "Did you build with pthread/w32thread support?" << std::endl;
}
}
// Initialize our codec.
if (avcodec_open(codec_context, codec) < 0) {
std::cerr << "Could not open codec " << codec_context->codec->name
<< std::endl;
return 1;
}
// Buffer used for audio decoding.
int16* samples =
reinterpret_cast<int16*>(av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE));
// Buffer used for video decoding.
AVFrame* frame = avcodec_alloc_frame();
if (!frame) {
std::cerr << "Could not allocate an AVFrame" << std::endl;
return 1;
}
// Stats collector.
std::vector<double> decode_times;
decode_times.reserve(4096);
// Parse through the entire stream until we hit EOF.
base::TimeTicks start = base::TimeTicks::HighResNow();
size_t frames = 0;
int read_result = 0;
do {
read_result = av_read_frame(format_context, &packet);
if (read_result < 0) {
if (flush) {
packet.stream_index = target_stream;
packet.size = 0;
} else {
break;
}
}
// Only decode packets from our target stream.
if (packet.stream_index == target_stream) {
int result = -1;
base::TimeTicks decode_start = base::TimeTicks::HighResNow();
if (target_codec == CODEC_TYPE_AUDIO) {
int size_out = AVCODEC_MAX_AUDIO_FRAME_SIZE;
result = avcodec_decode_audio3(codec_context, samples, &size_out,
&packet);
if (size_out) {
++frames;
read_result = 0; // Force continuation.
if (output) {
if (fwrite(samples, 1, size_out, output) !=
static_cast<size_t>(size_out)) {
std::cerr << "could not write data after " << size_out;
return 1;
}
}
}
} else if (target_codec == CODEC_TYPE_VIDEO) {
int got_picture = 0;
result = avcodec_decode_video2(codec_context, frame, &got_picture,
&packet);
if (got_picture) {
++frames;
read_result = 0; // Force continuation.
// TODO(fbarchard): support formats other than YV12.
if (output) {
for (int plane = 0; plane < 3; ++plane) {
const uint8* source = frame->data[plane];
const size_t source_stride = frame->linesize[plane];
size_t bytes_per_line = codec_context->width;
size_t copy_lines = codec_context->height;
if (plane != 0) {
switch (codec_context->pix_fmt) {
case PIX_FMT_YUV420P:
case PIX_FMT_YUVJ420P:
bytes_per_line /= 2;
copy_lines = (copy_lines + 1) / 2;
break;
case PIX_FMT_YUV422P:
case PIX_FMT_YUVJ422P:
bytes_per_line /= 2;
copy_lines = copy_lines;
break;
case PIX_FMT_YUV444P:
case PIX_FMT_YUVJ444P:
copy_lines = copy_lines;
break;
default:
std::cerr << "unknown video format: "
<< codec_context->pix_fmt;
return 1;
}
}
for (size_t i = 0; i < copy_lines; ++i) {
if (fwrite(source, 1, bytes_per_line, output) !=
bytes_per_line) {
std::cerr << "could not write data after "
<< bytes_per_line;
return 1;
}
source += source_stride;
}
}
}
}
} else {
NOTREACHED();
}
base::TimeDelta delta = base::TimeTicks::HighResNow() - decode_start;
decode_times.push_back(delta.InMillisecondsF());
// Make sure our decoding went OK.
if (result < 0) {
std::cerr << "Error while decoding" << std::endl;
return 1;
}
}
// Free our packet.
av_free_packet(&packet);
} while (read_result >= 0);
base::TimeDelta total = base::TimeTicks::HighResNow() - start;
if (output)
file_util::CloseFile(output);
// Calculate the sum of times. Note that some of these may be zero.
double sum = 0;
for (size_t i = 0; i < decode_times.size(); ++i) {
sum += decode_times[i];
}
// Print our results.
std::cout.setf(std::ios::fixed);
std::cout.precision(2);
std::cout << std::endl;
std::cout << " Frames:" << std::setw(10) << frames
<< std::endl;
std::cout << " Total:" << std::setw(10) << total.InMillisecondsF()
<< " ms" << std::endl;
std::cout << " Summation:" << std::setw(10) << sum
<< " ms" << std::endl;
if (frames > 0u) {
// Calculate the average time per frame.
double average = sum / frames;
// Calculate the sum of the squared differences.
// Standard deviation will only be accurate if no threads are used.
// TODO(fbarchard): Rethink standard deviation calculation.
double squared_sum = 0;
for (size_t i = 0; i < frames; ++i) {
double difference = decode_times[i] - average;
squared_sum += difference * difference;
}
// Calculate the standard deviation (jitter).
double stddev = sqrt(squared_sum / frames);
std::cout << " Average:" << std::setw(10) << average
<< " ms" << std::endl;
std::cout << " StdDev:" << std::setw(10) << stddev
<< " ms" << std::endl;
}
return 0;
}
|