summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjyw <jyw@chromium.org>2015-11-24 19:20:55 -0800
committerCommit bot <commit-bot@chromium.org>2015-11-25 03:22:57 +0000
commitad33ab7a20ddede811d0562c45a267a1b06c1ed6 (patch)
tree4177ffbb62e6a60174ff5761e7d444ad1aab184a
parent5f8b6bcfd0170eb47ddb48206fd6181c9d01c7b9 (diff)
downloadchromium_src-ad33ab7a20ddede811d0562c45a267a1b06c1ed6.zip
chromium_src-ad33ab7a20ddede811d0562c45a267a1b06c1ed6.tar.gz
chromium_src-ad33ab7a20ddede811d0562c45a267a1b06c1ed6.tar.bz2
Enable pcm_s32le audio decoding.
To accomodate this change, a new kSampleFormatS24 has been introduced. It is only used with S24LE PCM input. FFmpeg treats pcm_s24le as an encoded format that can be decoded into s32le samples; kSampleFormatS24 only exists on the Chrome side to distinguish FFmpeg's pcm_s24le from pcm_s32le, both of which use Chrome's catch-all kCodecPCM codec and the same real sample format. BUG=557170,internal b/22378279 TEST=media_unittests --gtest_filter=AudioBufferTest.* media_unittests --gtest_filter=PipelineIntegrationTest.S32PlaybackHashed Review URL: https://codereview.chromium.org/1422113002 Cr-Commit-Position: refs/heads/master@{#361545}
-rw-r--r--DEPS2
-rw-r--r--chromecast/media/cma/base/decoder_config_adapter.cc4
-rw-r--r--chromecast/public/media/decoder_config.h3
-rw-r--r--media/base/audio_buffer.cc2
-rw-r--r--media/base/audio_buffer_unittest.cc1
-rw-r--r--media/base/sample_format.cc3
-rw-r--r--media/base/sample_format.h3
-rw-r--r--media/cast/test/fake_media_source.cc20
-rw-r--r--media/ffmpeg/ffmpeg_common.cc19
-rw-r--r--media/ffmpeg/ffmpeg_common.h2
-rw-r--r--media/ffmpeg/ffmpeg_common_unittest.cc27
-rw-r--r--media/filters/ffmpeg_audio_decoder.cc3
-rw-r--r--media/mojo/interfaces/media_types.mojom3
-rw-r--r--media/test/data/sfx_s32le.wavbin0 -> 50996 bytes
-rw-r--r--media/test/pipeline_integration_test.cc8
15 files changed, 67 insertions, 33 deletions
diff --git a/DEPS b/DEPS
index ad41411..1a4785a 100644
--- a/DEPS
+++ b/DEPS
@@ -187,7 +187,7 @@ deps = {
Var('chromium_git') + '/webm/libvpx.git' + '@' + '204cde580a5f6dd5e7511c932c47c068046d9671',
'src/third_party/ffmpeg':
- Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + 'a3f3bd78577a16bb5ec49c9d4f4ebd039efc9123',
+ Var('chromium_git') + '/chromium/third_party/ffmpeg.git' + '@' + 'f962bcb6a8aed397bb2d418527a4e4a23f5ab955',
'src/third_party/libjingle/source/talk':
Var('chromium_git') + '/external/webrtc/trunk/talk.git' + '@' + '5a1f5b5548e30780dcb5211bcdf8ce4f4a937838', # commit position 10776
diff --git a/chromecast/media/cma/base/decoder_config_adapter.cc b/chromecast/media/cma/base/decoder_config_adapter.cc
index 0bcf49c..fa8ac20 100644
--- a/chromecast/media/cma/base/decoder_config_adapter.cc
+++ b/chromecast/media/cma/base/decoder_config_adapter.cc
@@ -44,6 +44,8 @@ SampleFormat ToSampleFormat(const ::media::SampleFormat sample_format) {
return kSampleFormatU8;
case ::media::kSampleFormatS16:
return kSampleFormatS16;
+ case ::media::kSampleFormatS24:
+ return kSampleFormatS24;
case ::media::kSampleFormatS32:
return kSampleFormatS32;
case ::media::kSampleFormatF32:
@@ -131,6 +133,8 @@ VideoProfile ToVideoProfile(const ::media::VideoCodecProfile codec_profile) {
return ::media::kSampleFormatU8;
case kSampleFormatS16:
return ::media::kSampleFormatS16;
+ case kSampleFormatS24:
+ return ::media::kSampleFormatS24;
case kSampleFormatS32:
return ::media::kSampleFormatS32;
case kSampleFormatF32:
diff --git a/chromecast/public/media/decoder_config.h b/chromecast/public/media/decoder_config.h
index cf84654..e2cf78d 100644
--- a/chromecast/public/media/decoder_config.h
+++ b/chromecast/public/media/decoder_config.h
@@ -45,9 +45,10 @@ enum SampleFormat {
kSampleFormatPlanarS16, // Signed 16-bit planar.
kSampleFormatPlanarF32, // Float 32-bit planar.
kSampleFormatPlanarS32, // Signed 32-bit planar.
+ kSampleFormatS24, // Signed 24-bit.
kSampleFormatMin = kUnknownSampleFormat,
- kSampleFormatMax = kSampleFormatPlanarS32,
+ kSampleFormatMax = kSampleFormatS24,
};
enum VideoCodec {
diff --git a/media/base/audio_buffer.cc b/media/base/audio_buffer.cc
index a497640..168d371 100644
--- a/media/base/audio_buffer.cc
+++ b/media/base/audio_buffer.cc
@@ -315,6 +315,7 @@ void ReadFramesInterleaved(const std::vector<uint8*>& channel_data,
InterleaveAndConvert<int16, Dest>(
channel_data, frames_to_copy * channel_count, trim_start, dest_data);
break;
+ case kSampleFormatS24:
case kSampleFormatS32:
InterleaveAndConvert<int32, Dest>(
channel_data, frames_to_copy * channel_count, trim_start, dest_data);
@@ -402,6 +403,7 @@ void AudioBuffer::TrimRange(int start, int end) {
break;
case kSampleFormatU8:
case kSampleFormatS16:
+ case kSampleFormatS24:
case kSampleFormatS32:
case kSampleFormatF32: {
// Interleaved data can be shifted all at once.
diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc
index b48220d..fdb89d3 100644
--- a/media/base/audio_buffer_unittest.cc
+++ b/media/base/audio_buffer_unittest.cc
@@ -463,6 +463,7 @@ static scoped_refptr<AudioBuffer> MakeReadFramesInterleavedTestBuffer(
1,
frames,
base::TimeDelta::FromSeconds(0));
+ case kSampleFormatS24:
case kSampleFormatS32:
return MakeAudioBuffer<int32>(kSampleFormatS32,
channel_layout,
diff --git a/media/base/sample_format.cc b/media/base/sample_format.cc
index dd0bc1d..de91283 100644
--- a/media/base/sample_format.cc
+++ b/media/base/sample_format.cc
@@ -17,6 +17,7 @@ int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
case kSampleFormatS16:
case kSampleFormatPlanarS16:
return 2;
+ case kSampleFormatS24:
case kSampleFormatS32:
case kSampleFormatF32:
case kSampleFormatPlanarF32:
@@ -36,6 +37,8 @@ const char* SampleFormatToString(SampleFormat sample_format) {
return "Unsigned 8-bit with bias of 128";
case kSampleFormatS16:
return "Signed 16-bit";
+ case kSampleFormatS24:
+ return "Signed 24-bit";
case kSampleFormatS32:
return "Signed 32-bit";
case kSampleFormatF32:
diff --git a/media/base/sample_format.h b/media/base/sample_format.h
index 0260f169..53d0cb9 100644
--- a/media/base/sample_format.h
+++ b/media/base/sample_format.h
@@ -22,9 +22,10 @@ enum SampleFormat {
kSampleFormatPlanarS16, // Signed 16-bit planar.
kSampleFormatPlanarF32, // Float 32-bit planar.
kSampleFormatPlanarS32, // Signed 32-bit planar.
+ kSampleFormatS24, // Signed 24-bit.
// Must always be equal to largest value ever logged.
- kSampleFormatMax = kSampleFormatPlanarS32,
+ kSampleFormatMax = kSampleFormatS24,
};
// Returns the number of bytes used per channel for the specified
diff --git a/media/cast/test/fake_media_source.cc b/media/cast/test/fake_media_source.cc
index b8820b7..24327d0 100644
--- a/media/cast/test/fake_media_source.cc
+++ b/media/cast/test/fake_media_source.cc
@@ -470,18 +470,14 @@ void FakeMediaSource::DecodeAudio(ScopedAVPacket packet) {
audio_sent_ts_->SetBaseTimestamp(base_ts);
}
- scoped_refptr<AudioBuffer> buffer =
- AudioBuffer::CopyFrom(
- AVSampleFormatToSampleFormat(
- av_audio_context()->sample_fmt),
- ChannelLayoutToChromeChannelLayout(
- av_audio_context()->channel_layout,
- av_audio_context()->channels),
- av_audio_context()->channels,
- av_audio_context()->sample_rate,
- frames_read,
- &avframe->data[0],
- PtsToTimeDelta(avframe->pkt_pts, av_audio_stream()->time_base));
+ scoped_refptr<AudioBuffer> buffer = AudioBuffer::CopyFrom(
+ AVSampleFormatToSampleFormat(av_audio_context()->sample_fmt,
+ av_audio_context()->codec_id),
+ ChannelLayoutToChromeChannelLayout(av_audio_context()->channel_layout,
+ av_audio_context()->channels),
+ av_audio_context()->channels, av_audio_context()->sample_rate,
+ frames_read, &avframe->data[0],
+ PtsToTimeDelta(avframe->pkt_pts, av_audio_stream()->time_base));
audio_algo_.EnqueueBuffer(buffer);
av_frame_unref(avframe);
} while (packet_temp.size > 0);
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
index c9d8669..772b41d 100644
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -74,6 +74,7 @@ static AudioCodec CodecIDToAudioCodec(AVCodecID codec_id) {
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S24LE:
+ case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_F32LE:
return kCodecPCM;
case AV_CODEC_ID_PCM_S16BE:
@@ -117,8 +118,10 @@ static AVCodecID AudioCodecToCodecID(AudioCodec audio_codec,
return AV_CODEC_ID_PCM_U8;
case kSampleFormatS16:
return AV_CODEC_ID_PCM_S16LE;
- case kSampleFormatS32:
+ case kSampleFormatS24:
return AV_CODEC_ID_PCM_S24LE;
+ case kSampleFormatS32:
+ return AV_CODEC_ID_PCM_S32LE;
case kSampleFormatF32:
return AV_CODEC_ID_PCM_F32LE;
default:
@@ -244,14 +247,18 @@ static int VideoCodecProfileToProfileID(VideoCodecProfile profile) {
return FF_PROFILE_UNKNOWN;
}
-SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format) {
+SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format,
+ AVCodecID codec_id) {
switch (sample_format) {
case AV_SAMPLE_FMT_U8:
return kSampleFormatU8;
case AV_SAMPLE_FMT_S16:
return kSampleFormatS16;
case AV_SAMPLE_FMT_S32:
- return kSampleFormatS32;
+ if (codec_id == AV_CODEC_ID_PCM_S24LE)
+ return kSampleFormatS24;
+ else
+ return kSampleFormatS32;
case AV_SAMPLE_FMT_FLT:
return kSampleFormatF32;
case AV_SAMPLE_FMT_S16P:
@@ -272,6 +279,8 @@ static AVSampleFormat SampleFormatToAVSampleFormat(SampleFormat sample_format) {
return AV_SAMPLE_FMT_U8;
case kSampleFormatS16:
return AV_SAMPLE_FMT_S16;
+ // pcm_s24le is treated as a codec with sample format s32 in ffmpeg
+ case kSampleFormatS24:
case kSampleFormatS32:
return AV_SAMPLE_FMT_S32;
case kSampleFormatF32:
@@ -293,8 +302,8 @@ bool AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
AudioCodec codec = CodecIDToAudioCodec(codec_context->codec_id);
- SampleFormat sample_format =
- AVSampleFormatToSampleFormat(codec_context->sample_fmt);
+ SampleFormat sample_format = AVSampleFormatToSampleFormat(
+ codec_context->sample_fmt, codec_context->codec_id);
ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout(
codec_context->channel_layout, codec_context->channels);
diff --git a/media/ffmpeg/ffmpeg_common.h b/media/ffmpeg/ffmpeg_common.h
index 7938a32..955ee64 100644
--- a/media/ffmpeg/ffmpeg_common.h
+++ b/media/ffmpeg/ffmpeg_common.h
@@ -123,7 +123,7 @@ MEDIA_EXPORT AVCodecID VideoCodecToCodecID(VideoCodec video_codec);
// Converts FFmpeg's audio sample format to Chrome's SampleFormat.
MEDIA_EXPORT SampleFormat
-AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
+AVSampleFormatToSampleFormat(AVSampleFormat sample_format, AVCodecID codec_id);
// Converts FFmpeg's pixel formats to its corresponding supported video format.
MEDIA_EXPORT VideoPixelFormat
diff --git a/media/ffmpeg/ffmpeg_common_unittest.cc b/media/ffmpeg/ffmpeg_common_unittest.cc
index aa8a06d..9a73587 100644
--- a/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -161,17 +161,24 @@ TEST_F(FFmpegCommonTest, VerifyFormatSizes) {
for (AVSampleFormat format = AV_SAMPLE_FMT_NONE;
format < AV_SAMPLE_FMT_NB;
format = static_cast<AVSampleFormat>(format + 1)) {
- SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
- if (sample_format == kUnknownSampleFormat) {
- // This format not supported, so skip it.
- continue;
+ std::vector<AVCodecID> codec_ids(1, AV_CODEC_ID_NONE);
+ if (format == AV_SAMPLE_FMT_S32)
+ codec_ids.push_back(AV_CODEC_ID_PCM_S24LE);
+ for (const auto& codec_id : codec_ids) {
+ SampleFormat sample_format =
+ AVSampleFormatToSampleFormat(format, codec_id);
+ if (sample_format == kUnknownSampleFormat) {
+ // This format not supported, so skip it.
+ continue;
+ }
+
+ // Have FFMpeg compute the size of a buffer of 1 channel / 1 frame
+ // with 1 byte alignment to make sure the sizes match.
+ int single_buffer_size =
+ av_samples_get_buffer_size(NULL, 1, 1, format, 1);
+ int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
+ EXPECT_EQ(bytes_per_channel, single_buffer_size);
}
-
- // Have FFMpeg compute the size of a buffer of 1 channel / 1 frame
- // with 1 byte alignment to make sure the sizes match.
- int single_buffer_size = av_samples_get_buffer_size(NULL, 1, 1, format, 1);
- int bytes_per_channel = SampleFormatToBytesPerChannel(sample_format);
- EXPECT_EQ(bytes_per_channel, single_buffer_size);
}
}
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
index 206f971..2ad55cd 100644
--- a/media/filters/ffmpeg_audio_decoder.cc
+++ b/media/filters/ffmpeg_audio_decoder.cc
@@ -58,7 +58,8 @@ static int GetAudioBuffer(struct AVCodecContext* s, AVFrame* frame, int flags) {
// data, use the values supplied by FFmpeg (ignoring the current settings).
// FFmpegDecode() gets to determine if the buffer is useable or not.
AVSampleFormat format = static_cast<AVSampleFormat>(frame->format);
- SampleFormat sample_format = AVSampleFormatToSampleFormat(format);
+ SampleFormat sample_format =
+ AVSampleFormatToSampleFormat(format, s->codec_id);
int channels = DetermineChannels(frame);
if (channels <= 0 || channels >= limits::kMaxChannels) {
DLOG(ERROR) << "Requested number of channels (" << channels
diff --git a/media/mojo/interfaces/media_types.mojom b/media/mojo/interfaces/media_types.mojom
index 78784f9..befd80a 100644
--- a/media/mojo/interfaces/media_types.mojom
+++ b/media/mojo/interfaces/media_types.mojom
@@ -86,7 +86,8 @@ enum SampleFormat {
PlanarS16,
PlanarF32,
PlanarS32,
- Max = PlanarS32,
+ S24,
+ Max = S24,
};
// See media/base/video_types.h for descriptions.
diff --git a/media/test/data/sfx_s32le.wav b/media/test/data/sfx_s32le.wav
new file mode 100644
index 0000000..5f670a9
--- /dev/null
+++ b/media/test/data/sfx_s32le.wav
Binary files differ
diff --git a/media/test/pipeline_integration_test.cc b/media/test/pipeline_integration_test.cc
index b19b32c..69da770 100644
--- a/media/test/pipeline_integration_test.cc
+++ b/media/test/pipeline_integration_test.cc
@@ -909,6 +909,14 @@ TEST_F(PipelineIntegrationTest, BasicPlaybackLive) {
demuxer_->GetTimelineOffset());
}
+TEST_F(PipelineIntegrationTest, S32PlaybackHashed) {
+ ASSERT_EQ(PIPELINE_OK, Start("sfx_s32le.wav", kHashed));
+ Play();
+ ASSERT_TRUE(WaitUntilOnEnded());
+ EXPECT_HASH_EQ(std::string(kNullVideoHash), GetVideoHash());
+ EXPECT_HASH_EQ("3.03,2.86,2.99,3.31,3.57,4.06,", GetAudioHash());
+}
+
TEST_F(PipelineIntegrationTest, F32PlaybackHashed) {
ASSERT_EQ(PIPELINE_OK, Start("sfx_f32le.wav", kHashed));
Play();