diff options
Diffstat (limited to 'content/browser/speech')
33 files changed, 266 insertions, 335 deletions
diff --git a/content/browser/speech/audio_buffer.cc b/content/browser/speech/audio_buffer.cc index ef49004..3e7d2a4 100644 --- a/content/browser/speech/audio_buffer.cc +++ b/content/browser/speech/audio_buffer.cc @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "content/browser/speech/audio_buffer.h" + #include "base/logging.h" #include "base/stl_util.h" -#include "content/browser/speech/audio_buffer.h" -namespace speech { +namespace content { AudioChunk::AudioChunk(int bytes_per_sample) : bytes_per_sample_(bytes_per_sample) { @@ -87,4 +88,4 @@ bool AudioBuffer::IsEmpty() const { return chunks_.empty(); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/audio_buffer.h b/content/browser/speech/audio_buffer.h index e50708bf..783ae66 100644 --- a/content/browser/speech/audio_buffer.h +++ b/content/browser/speech/audio_buffer.h @@ -12,7 +12,7 @@ #include "base/memory/ref_counted.h" #include "content/common/content_export.h" -namespace speech { +namespace content { // Models a chunk derived from an AudioBuffer. class CONTENT_EXPORT AudioChunk : @@ -71,6 +71,6 @@ class AudioBuffer { DISALLOW_COPY_AND_ASSIGN(AudioBuffer); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_AUDIO_BUFFER_H_ diff --git a/content/browser/speech/audio_encoder.cc b/content/browser/speech/audio_encoder.cc index b98b94f..8eade5a 100644 --- a/content/browser/speech/audio_encoder.cc +++ b/content/browser/speech/audio_encoder.cc @@ -13,9 +13,7 @@ #include "third_party/flac/flac.h" #include "third_party/speex/speex.h" -using std::string; -using speech::AudioChunk; - +namespace content { namespace { //-------------------------------- FLACEncoder --------------------------------- @@ -23,7 +21,7 @@ namespace { const char* const kContentTypeFLAC = "audio/x-flac; rate="; const int kFLACCompressionLevel = 0; // 0 for speed -class FLACEncoder : public speech::AudioEncoder { +class FLACEncoder : public AudioEncoder { public: FLACEncoder(int sampling_rate, int bits_per_sample); virtual ~FLACEncoder(); @@ -112,7 +110,7 @@ const int kMaxSpeexFrameLength = 110; // (44kbps rate sampled at 32kHz). // make sure it is within the byte range. COMPILE_ASSERT(kMaxSpeexFrameLength <= 0xFF, invalidLength); -class SpeexEncoder : public speech::AudioEncoder { +class SpeexEncoder : public AudioEncoder { public: explicit SpeexEncoder(int sampling_rate, int bits_per_sample); virtual ~SpeexEncoder(); @@ -172,8 +170,6 @@ void SpeexEncoder::Encode(const AudioChunk& raw_audio) { } // namespace -namespace speech { - AudioEncoder* AudioEncoder::Create(Codec codec, int sampling_rate, int bits_per_sample) { @@ -195,4 +191,4 @@ scoped_refptr<AudioChunk> AudioEncoder::GetEncodedDataAndClear() { return encoded_audio_buffer_.DequeueAll(); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/audio_encoder.h b/content/browser/speech/audio_encoder.h index 43bfa1fe..581eafd 100644 --- a/content/browser/speech/audio_encoder.h +++ b/content/browser/speech/audio_encoder.h @@ -12,8 +12,9 @@ #include "base/memory/ref_counted.h" #include "content/browser/speech/audio_buffer.h" -namespace speech { +namespace content{ class AudioChunk; + // Provides a simple interface to encode raw audio using the various speech // codecs. class AudioEncoder { @@ -54,6 +55,6 @@ class AudioEncoder { DISALLOW_COPY_AND_ASSIGN(AudioEncoder); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_AUDIO_ENCODER_H_ diff --git a/content/browser/speech/chunked_byte_buffer.cc b/content/browser/speech/chunked_byte_buffer.cc index 23a6e64..ae8a6ce 100644 --- a/content/browser/speech/chunked_byte_buffer.cc +++ b/content/browser/speech/chunked_byte_buffer.cc @@ -26,7 +26,7 @@ uint32 ReadBigEndian32(const uint8* buffer) { } // namespace -namespace speech { +namespace content { ChunkedByteBuffer::ChunkedByteBuffer() : partial_chunk_(new Chunk()), @@ -133,4 +133,4 @@ size_t ChunkedByteBuffer::Chunk::ExpectedContentLength() const { return static_cast<size_t>(ReadBigEndian32(&header[0])); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/chunked_byte_buffer.h b/content/browser/speech/chunked_byte_buffer.h index 1cacf79..8b9d237 100644 --- a/content/browser/speech/chunked_byte_buffer.h +++ b/content/browser/speech/chunked_byte_buffer.h @@ -13,7 +13,7 @@ #include "base/memory/scoped_vector.h" #include "content/common/content_export.h" -namespace speech { +namespace content { // Models a chunk-oriented byte buffer. The term chunk is herein defined as an // arbitrary sequence of bytes that is preceeded by N header bytes, indicating @@ -70,6 +70,6 @@ class CONTENT_EXPORT ChunkedByteBuffer { }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_CHUNKED_BYTE_BUFFER_H_ diff --git a/content/browser/speech/chunked_byte_buffer_unittest.cc b/content/browser/speech/chunked_byte_buffer_unittest.cc index 1633101..57fdf4c 100644 --- a/content/browser/speech/chunked_byte_buffer_unittest.cc +++ b/content/browser/speech/chunked_byte_buffer_unittest.cc @@ -8,7 +8,7 @@ #include "content/browser/speech/chunked_byte_buffer.h" #include "testing/gtest/include/gtest/gtest.h" -namespace speech { +namespace content { typedef std::vector<uint8> ByteVector; @@ -73,4 +73,4 @@ TEST(ChunkedByteBufferTest, BasicTest) { EXPECT_FALSE(buffer.HasChunks()); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/endpointer/endpointer.cc b/content/browser/speech/endpointer/endpointer.cc index b4a54c1..b5e8f0b 100644 --- a/content/browser/speech/endpointer/endpointer.cc +++ b/content/browser/speech/endpointer/endpointer.cc @@ -2,18 +2,18 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "content/browser/speech/audio_buffer.h" #include "content/browser/speech/endpointer/endpointer.h" #include "base/time.h" +#include "content/browser/speech/audio_buffer.h" using base::Time; namespace { -static const int kFrameRate = 50; // 1 frame = 20ms of audio. +const int kFrameRate = 50; // 1 frame = 20ms of audio. } -namespace speech { +namespace content { Endpointer::Endpointer(int sample_rate) : speech_input_possibly_complete_silence_length_us_(-1), @@ -166,4 +166,4 @@ EpStatus Endpointer::ProcessAudio(const AudioChunk& raw_audio, float* rms_out) { return ep_status; } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/endpointer/endpointer.h b/content/browser/speech/endpointer/endpointer.h index 89ec3a9..6688ee6 100644 --- a/content/browser/speech/endpointer/endpointer.h +++ b/content/browser/speech/endpointer/endpointer.h @@ -11,7 +11,7 @@ class EpStatus; -namespace speech { +namespace content { class AudioChunk; @@ -148,6 +148,6 @@ class CONTENT_EXPORT Endpointer { int32 frame_size_; }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_ENDPOINTER_ENDPOINTER_H_ diff --git a/content/browser/speech/endpointer/endpointer_unittest.cc b/content/browser/speech/endpointer/endpointer_unittest.cc index bdf8cc4..6bfe903 100644 --- a/content/browser/speech/endpointer/endpointer_unittest.cc +++ b/content/browser/speech/endpointer/endpointer_unittest.cc @@ -16,7 +16,7 @@ const int kFrameSize = kSampleRate / kFrameRate; // 160 samples. COMPILE_ASSERT(kFrameSize == 160, invalid_frame_size); } -namespace speech { +namespace content { class FrameProcessor { public: @@ -145,4 +145,4 @@ TEST(EndpointerTest, TestEmbeddedEndpointerEvents) { endpointer.EndSession(); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/endpointer/energy_endpointer.cc b/content/browser/speech/endpointer/energy_endpointer.cc index b88d7a3..d8d1274 100644 --- a/content/browser/speech/endpointer/energy_endpointer.cc +++ b/content/browser/speech/endpointer/energy_endpointer.cc @@ -41,7 +41,7 @@ float GetDecibel(float value) { } // namespace -namespace speech { +namespace content { // Stores threshold-crossing histories for making decisions about the speech // state. @@ -373,4 +373,4 @@ EpStatus EnergyEndpointer::Status(int64* status_time) const { return status_; } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/endpointer/energy_endpointer.h b/content/browser/speech/endpointer/energy_endpointer.h index 9db927a..0aa421f 100644 --- a/content/browser/speech/endpointer/energy_endpointer.h +++ b/content/browser/speech/endpointer/energy_endpointer.h @@ -44,7 +44,7 @@ #include "content/browser/speech/endpointer/energy_endpointer_params.h" #include "content/common/content_export.h" -namespace speech { +namespace content { // Endpointer status codes enum EpStatus { @@ -150,6 +150,6 @@ class CONTENT_EXPORT EnergyEndpointer { DISALLOW_COPY_AND_ASSIGN(EnergyEndpointer); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_ENDPOINTER_ENERGY_ENDPOINTER_H_ diff --git a/content/browser/speech/endpointer/energy_endpointer_params.cc b/content/browser/speech/endpointer/energy_endpointer_params.cc index 89852f1..9cdf024 100644 --- a/content/browser/speech/endpointer/energy_endpointer_params.cc +++ b/content/browser/speech/endpointer/energy_endpointer_params.cc @@ -4,7 +4,7 @@ #include "content/browser/speech/endpointer/energy_endpointer_params.h" -namespace speech { +namespace content { EnergyEndpointerParams::EnergyEndpointerParams() { SetDefaults(); @@ -50,4 +50,4 @@ void EnergyEndpointerParams::operator=(const EnergyEndpointerParams& source) { contamination_rejection_period_ = source.contamination_rejection_period(); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/endpointer/energy_endpointer_params.h b/content/browser/speech/endpointer/energy_endpointer_params.h index 9641d9d..436adce 100644 --- a/content/browser/speech/endpointer/energy_endpointer_params.h +++ b/content/browser/speech/endpointer/energy_endpointer_params.h @@ -8,7 +8,7 @@ #include "base/basictypes.h" #include "content/common/content_export.h" -namespace speech { +namespace content { // Input parameters for the EnergyEndpointer class. class CONTENT_EXPORT EnergyEndpointerParams { @@ -133,6 +133,6 @@ class CONTENT_EXPORT EnergyEndpointerParams { float contamination_rejection_period_; }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_ENDPOINTER_ENERGY_ENDPOINTER_PARAMS_H_ diff --git a/content/browser/speech/google_one_shot_remote_engine.cc b/content/browser/speech/google_one_shot_remote_engine.cc index 8180844..d713dfd 100644 --- a/content/browser/speech/google_one_shot_remote_engine.cc +++ b/content/browser/speech/google_one_shot_remote_engine.cc @@ -21,10 +21,7 @@ #include "net/url_request/url_request_context_getter.h" #include "net/url_request/url_request_status.h" -using content::SpeechRecognitionError; -using content::SpeechRecognitionHypothesis; -using content::SpeechRecognitionResult; - +namespace content { namespace { const char* const kDefaultSpeechRecognitionUrl = @@ -36,8 +33,7 @@ const char* const kConfidenceString = "confidence"; const int kWebServiceStatusNoError = 0; const int kWebServiceStatusNoSpeech = 4; const int kWebServiceStatusNoMatch = 5; -const speech::AudioEncoder::Codec kDefaultAudioCodec = - speech::AudioEncoder::CODEC_FLAC; +const AudioEncoder::Codec kDefaultAudioCodec = AudioEncoder::CODEC_FLAC; bool ParseServerResponse(const std::string& response_body, SpeechRecognitionResult* result, @@ -78,13 +74,13 @@ bool ParseServerResponse(const std::string& response_body, case kWebServiceStatusNoError: break; case kWebServiceStatusNoSpeech: - error->code = content::SPEECH_RECOGNITION_ERROR_NO_SPEECH; + error->code = SPEECH_RECOGNITION_ERROR_NO_SPEECH; return false; case kWebServiceStatusNoMatch: - error->code = content::SPEECH_RECOGNITION_ERROR_NO_MATCH; + error->code = SPEECH_RECOGNITION_ERROR_NO_MATCH; return false; default: - error->code = content::SPEECH_RECOGNITION_ERROR_NETWORK; + error->code = SPEECH_RECOGNITION_ERROR_NETWORK; // Other status codes should not be returned by the server. VLOG(1) << "ParseServerResponse: unexpected status code " << status; return false; @@ -148,8 +144,6 @@ bool ParseServerResponse(const std::string& response_body, } // namespace -namespace speech { - const int GoogleOneShotRemoteEngine::kAudioPacketIntervalMs = 100; int GoogleOneShotRemoteEngine::url_fetcher_id_for_tests = 0; @@ -266,7 +260,7 @@ void GoogleOneShotRemoteEngine::OnURLFetchComplete( const net::URLFetcher* source) { DCHECK_EQ(url_fetcher_.get(), source); SpeechRecognitionResult result; - SpeechRecognitionError error(content::SPEECH_RECOGNITION_ERROR_NETWORK); + SpeechRecognitionError error(SPEECH_RECOGNITION_ERROR_NETWORK); std::string data; // The default error code in case of parse errors is NETWORK_FAILURE, however @@ -293,4 +287,4 @@ int GoogleOneShotRemoteEngine::GetDesiredAudioChunkDurationMs() const { return kAudioPacketIntervalMs; } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/google_one_shot_remote_engine.h b/content/browser/speech/google_one_shot_remote_engine.h index 34942b3..442aa1a 100644 --- a/content/browser/speech/google_one_shot_remote_engine.h +++ b/content/browser/speech/google_one_shot_remote_engine.h @@ -16,18 +16,11 @@ #include "googleurl/src/gurl.h" #include "net/url_request/url_fetcher_delegate.h" -namespace content { -struct SpeechRecognitionResult; -class URLFetcher; -} - namespace net { class URLRequestContextGetter; } -namespace speech { - -class AudioChunk; +namespace content { // Implements a SpeechRecognitionEngine by means of remote interaction with // Google speech recognition webservice. @@ -64,6 +57,6 @@ class CONTENT_EXPORT GoogleOneShotRemoteEngine DISALLOW_COPY_AND_ASSIGN(GoogleOneShotRemoteEngine); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_GOOGLE_ONE_SHOT_REMOTE_ENGINE_H_ diff --git a/content/browser/speech/google_one_shot_remote_engine_unittest.cc b/content/browser/speech/google_one_shot_remote_engine_unittest.cc index 2ffae29..7142eeb 100644 --- a/content/browser/speech/google_one_shot_remote_engine_unittest.cc +++ b/content/browser/speech/google_one_shot_remote_engine_unittest.cc @@ -13,14 +13,13 @@ #include "net/url_request/url_request_status.h" #include "testing/gtest/include/gtest/gtest.h" -namespace speech { +namespace content { -class GoogleOneShotRemoteEngineTest - : public SpeechRecognitionEngineDelegate, - public testing::Test { +class GoogleOneShotRemoteEngineTest : public SpeechRecognitionEngineDelegate, + public testing::Test { public: GoogleOneShotRemoteEngineTest() - : error_(content::SPEECH_RECOGNITION_ERROR_NONE) {} + : error_(SPEECH_RECOGNITION_ERROR_NONE) {} // Creates a speech recognition request and invokes its URL fetcher delegate // with the given test data. @@ -28,20 +27,20 @@ class GoogleOneShotRemoteEngineTest // SpeechRecognitionRequestDelegate methods. virtual void OnSpeechRecognitionEngineResult( - const content::SpeechRecognitionResult& result) OVERRIDE { + const SpeechRecognitionResult& result) OVERRIDE { result_ = result; } virtual void OnSpeechRecognitionEngineError( - const content::SpeechRecognitionError& error) OVERRIDE { + const SpeechRecognitionError& error) OVERRIDE { error_ = error.code; } protected: MessageLoop message_loop_; net::TestURLFetcherFactory url_fetcher_factory_; - content::SpeechRecognitionErrorCode error_; - content::SpeechRecognitionResult result_; + SpeechRecognitionErrorCode error_; + SpeechRecognitionResult result_; }; void GoogleOneShotRemoteEngineTest::CreateAndTestRequest( @@ -76,7 +75,7 @@ TEST_F(GoogleOneShotRemoteEngineTest, BasicTest) { CreateAndTestRequest(true, "{\"status\":0,\"hypotheses\":" "[{\"utterance\":\"123456\",\"confidence\":0.9}]}"); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NONE); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NONE); EXPECT_EQ(1U, result_.hypotheses.size()); EXPECT_EQ(ASCIIToUTF16("123456"), result_.hypotheses[0].utterance); EXPECT_EQ(0.9, result_.hypotheses[0].confidence); @@ -86,7 +85,7 @@ TEST_F(GoogleOneShotRemoteEngineTest, BasicTest) { "{\"status\":0,\"hypotheses\":[" "{\"utterance\":\"hello\",\"confidence\":0.9}," "{\"utterance\":\"123456\",\"confidence\":0.5}]}"); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NONE); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NONE); EXPECT_EQ(2u, result_.hypotheses.size()); EXPECT_EQ(ASCIIToUTF16("hello"), result_.hypotheses[0].utterance); EXPECT_EQ(0.9, result_.hypotheses[0].confidence); @@ -95,29 +94,29 @@ TEST_F(GoogleOneShotRemoteEngineTest, BasicTest) { // Zero results. CreateAndTestRequest(true, "{\"status\":0,\"hypotheses\":[]}"); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NONE); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NONE); EXPECT_EQ(0U, result_.hypotheses.size()); // Http failure case. CreateAndTestRequest(false, ""); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NETWORK); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK); EXPECT_EQ(0U, result_.hypotheses.size()); // Invalid status case. CreateAndTestRequest(true, "{\"status\":\"invalid\",\"hypotheses\":[]}"); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NETWORK); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK); EXPECT_EQ(0U, result_.hypotheses.size()); // Server-side error case. CreateAndTestRequest(true, "{\"status\":1,\"hypotheses\":[]}"); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NETWORK); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK); EXPECT_EQ(0U, result_.hypotheses.size()); // Malformed JSON case. CreateAndTestRequest(true, "{\"status\":0,\"hypotheses\":" "[{\"unknownkey\":\"hello\"}]}"); - EXPECT_EQ(error_, content::SPEECH_RECOGNITION_ERROR_NETWORK); + EXPECT_EQ(error_, SPEECH_RECOGNITION_ERROR_NETWORK); EXPECT_EQ(0U, result_.hypotheses.size()); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/google_streaming_remote_engine.cc b/content/browser/speech/google_streaming_remote_engine.cc index 66bd9e6..2f050bd 100644 --- a/content/browser/speech/google_streaming_remote_engine.cc +++ b/content/browser/speech/google_streaming_remote_engine.cc @@ -27,13 +27,9 @@ #include "net/url_request/url_request_context.h" #include "net/url_request/url_request_status.h" -using content::BrowserThread; -using content::SpeechRecognitionError; -using content::SpeechRecognitionErrorCode; -using content::SpeechRecognitionHypothesis; -using content::SpeechRecognitionResult; using net::URLFetcher; +namespace content { namespace { const char kWebServiceBaseUrl[] = @@ -41,8 +37,7 @@ const char kWebServiceBaseUrl[] = const char kDownstreamUrl[] = "/down?"; const char kUpstreamUrl[] = "/up?"; const int kAudioPacketIntervalMs = 100; -const speech::AudioEncoder::Codec kDefaultAudioCodec = - speech::AudioEncoder::CODEC_FLAC; +const AudioEncoder::Codec kDefaultAudioCodec = AudioEncoder::CODEC_FLAC; // This mathces the maximum maxAlternatives value supported by the server. const uint32 kMaxMaxAlternatives = 30; @@ -50,7 +45,7 @@ const uint32 kMaxMaxAlternatives = 30; // TODO(hans): Remove this and other logging when we don't need it anymore. void DumpResponse(const std::string& response) { DVLOG(1) << "------------"; - speech::proto::SpeechRecognitionEvent event; + proto::SpeechRecognitionEvent event; if (!event.ParseFromString(response)) { DVLOG(1) << "Parse failed!"; return; @@ -59,13 +54,13 @@ void DumpResponse(const std::string& response) { DVLOG(1) << "STATUS\t" << event.status(); for (int i = 0; i < event.result_size(); ++i) { DVLOG(1) << "RESULT #" << i << ":"; - const speech::proto::SpeechRecognitionResult& res = event.result(i); + const proto::SpeechRecognitionResult& res = event.result(i); if (res.has_final()) DVLOG(1) << " FINAL:\t" << res.final(); if (res.has_stability()) DVLOG(1) << " STABILITY:\t" << res.stability(); for (int j = 0; j < res.alternative_size(); ++j) { - const speech::proto::SpeechRecognitionAlternative& alt = + const proto::SpeechRecognitionAlternative& alt = res.alternative(j); if (alt.has_confidence()) DVLOG(1) << " CONFIDENCE:\t" << alt.confidence(); @@ -92,8 +87,6 @@ std::string GetAPIKey() { } // namespace -namespace speech { - const int GoogleStreamingRemoteEngine::kUpstreamUrlFetcherIdForTests = 0; const int GoogleStreamingRemoteEngine::kDownstreamUrlFetcherIdForTests = 1; const int GoogleStreamingRemoteEngine::kWebserviceStatusNoError = 0; @@ -422,24 +415,24 @@ GoogleStreamingRemoteEngine::ProcessDownstreamResponse( case proto::SpeechRecognitionEvent::STATUS_SUCCESS: break; case proto::SpeechRecognitionEvent::STATUS_NO_SPEECH: - return Abort(content::SPEECH_RECOGNITION_ERROR_NO_SPEECH); + return Abort(SPEECH_RECOGNITION_ERROR_NO_SPEECH); case proto::SpeechRecognitionEvent::STATUS_ABORTED: - return Abort(content::SPEECH_RECOGNITION_ERROR_ABORTED); + return Abort(SPEECH_RECOGNITION_ERROR_ABORTED); case proto::SpeechRecognitionEvent::STATUS_AUDIO_CAPTURE: - return Abort(content::SPEECH_RECOGNITION_ERROR_AUDIO); + return Abort(SPEECH_RECOGNITION_ERROR_AUDIO); case proto::SpeechRecognitionEvent::STATUS_NETWORK: - return Abort(content::SPEECH_RECOGNITION_ERROR_NETWORK); + return Abort(SPEECH_RECOGNITION_ERROR_NETWORK); case proto::SpeechRecognitionEvent::STATUS_NOT_ALLOWED: // TODO(hans): We need a better error code for this. - return Abort(content::SPEECH_RECOGNITION_ERROR_ABORTED); + return Abort(SPEECH_RECOGNITION_ERROR_ABORTED); case proto::SpeechRecognitionEvent::STATUS_SERVICE_NOT_ALLOWED: // TODO(hans): We need a better error code for this. - return Abort(content::SPEECH_RECOGNITION_ERROR_ABORTED); + return Abort(SPEECH_RECOGNITION_ERROR_ABORTED); case proto::SpeechRecognitionEvent::STATUS_BAD_GRAMMAR: - return Abort(content::SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR); + return Abort(SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR); case proto::SpeechRecognitionEvent::STATUS_LANGUAGE_NOT_SUPPORTED: // TODO(hans): We need a better error code for this. - return Abort(content::SPEECH_RECOGNITION_ERROR_ABORTED); + return Abort(SPEECH_RECOGNITION_ERROR_ABORTED); } } @@ -524,19 +517,19 @@ GoogleStreamingRemoteEngine::CloseDownstream(const FSMEventArgs&) { GoogleStreamingRemoteEngine::FSMState GoogleStreamingRemoteEngine::AbortSilently(const FSMEventArgs&) { - return Abort(content::SPEECH_RECOGNITION_ERROR_NONE); + return Abort(SPEECH_RECOGNITION_ERROR_NONE); } GoogleStreamingRemoteEngine::FSMState GoogleStreamingRemoteEngine::AbortWithError(const FSMEventArgs&) { - return Abort(content::SPEECH_RECOGNITION_ERROR_NETWORK); + return Abort(SPEECH_RECOGNITION_ERROR_NETWORK); } GoogleStreamingRemoteEngine::FSMState GoogleStreamingRemoteEngine::Abort( SpeechRecognitionErrorCode error_code) { DVLOG(1) << "Aborting with error " << error_code; - if (error_code != content::SPEECH_RECOGNITION_ERROR_NONE) { + if (error_code != SPEECH_RECOGNITION_ERROR_NONE) { delegate()->OnSpeechRecognitionEngineError( SpeechRecognitionError(error_code)); } @@ -596,4 +589,4 @@ GoogleStreamingRemoteEngine::FSMEventArgs::FSMEventArgs(FSMEvent event_value) GoogleStreamingRemoteEngine::FSMEventArgs::~FSMEventArgs() { } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/google_streaming_remote_engine.h b/content/browser/speech/google_streaming_remote_engine.h index df46b4a..7a60895 100644 --- a/content/browser/speech/google_streaming_remote_engine.h +++ b/content/browser/speech/google_streaming_remote_engine.h @@ -20,18 +20,15 @@ #include "googleurl/src/gurl.h" #include "net/url_request/url_fetcher_delegate.h" -namespace content { -struct SpeechRecognitionError; -struct SpeechRecognitionResult; -} - namespace net { class URLRequestContextGetter; } -namespace speech { +namespace content { class AudioChunk; +struct SpeechRecognitionError; +struct SpeechRecognitionResult; // Implements a SpeechRecognitionEngine supporting continuous recognition by // means of interaction with Google streaming speech recognition webservice. @@ -138,7 +135,7 @@ class CONTENT_EXPORT GoogleStreamingRemoteEngine FSMState CloseDownstream(const FSMEventArgs& event_args); FSMState AbortSilently(const FSMEventArgs& event_args); FSMState AbortWithError(const FSMEventArgs& event_args); - FSMState Abort(content::SpeechRecognitionErrorCode error); + FSMState Abort(SpeechRecognitionErrorCode error); FSMState DoNothing(const FSMEventArgs& event_args); FSMState NotFeasible(const FSMEventArgs& event_args); @@ -159,6 +156,6 @@ class CONTENT_EXPORT GoogleStreamingRemoteEngine DISALLOW_COPY_AND_ASSIGN(GoogleStreamingRemoteEngine); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_GOOGLE_STREAMING_REMOTE_ENGINE_H_ diff --git a/content/browser/speech/google_streaming_remote_engine_unittest.cc b/content/browser/speech/google_streaming_remote_engine_unittest.cc index 8956d9a..3aa91c8 100644 --- a/content/browser/speech/google_streaming_remote_engine_unittest.cc +++ b/content/browser/speech/google_streaming_remote_engine_unittest.cc @@ -17,24 +17,21 @@ #include "net/url_request/url_request_status.h" #include "testing/gtest/include/gtest/gtest.h" -using content::SpeechRecognitionHypothesis; -using content::SpeechRecognitionResult; using net::URLRequestStatus; using net::TestURLFetcher; using net::TestURLFetcherFactory; -namespace speech { +namespace content { // Note: the terms upstream and downstream are from the point-of-view of the // client (engine_under_test_). -class GoogleStreamingRemoteEngineTest - : public SpeechRecognitionEngineDelegate, - public testing::Test { +class GoogleStreamingRemoteEngineTest : public SpeechRecognitionEngineDelegate, + public testing::Test { public: GoogleStreamingRemoteEngineTest() : last_number_of_upstream_chunks_seen_(0U), - error_(content::SPEECH_RECOGNITION_ERROR_NONE) { } + error_(SPEECH_RECOGNITION_ERROR_NONE) { } // Creates a speech recognition request and invokes its URL fetcher delegate // with the given test data. @@ -46,7 +43,7 @@ class GoogleStreamingRemoteEngineTest results_.push(result); } virtual void OnSpeechRecognitionEngineError( - const content::SpeechRecognitionError& error) OVERRIDE { + const SpeechRecognitionError& error) OVERRIDE { error_ = error.code; } @@ -84,7 +81,7 @@ class GoogleStreamingRemoteEngineTest size_t last_number_of_upstream_chunks_seen_; MessageLoop message_loop_; std::string response_buffer_; - content::SpeechRecognitionErrorCode error_; + SpeechRecognitionErrorCode error_; std::queue<SpeechRecognitionResult> results_; }; @@ -122,7 +119,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, SingleDefinitiveResult) { CloseMockDownstream(DOWNSTREAM_ERROR_NONE); ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); EndMockRecognition(); - ASSERT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); ASSERT_EQ(0U, results_.size()); } @@ -164,7 +161,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, SeveralStreamingResults) { CloseMockDownstream(DOWNSTREAM_ERROR_NONE); ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); EndMockRecognition(); - ASSERT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); ASSERT_EQ(0U, results_.size()); } @@ -199,7 +196,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, NoFinalResultAfterAudioChunksEnded) { // Ensure everything is closed cleanly after the downstream is closed. ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); EndMockRecognition(); - ASSERT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); ASSERT_EQ(0U, results_.size()); } @@ -246,7 +243,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, HTTPError) { // Expect a SPEECH_RECOGNITION_ERROR_NETWORK error to be raised. ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); EndMockRecognition(); - ASSERT_EQ(content::SPEECH_RECOGNITION_ERROR_NETWORK, error_); + ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NETWORK, error_); ASSERT_EQ(0U, results_.size()); } @@ -264,7 +261,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, NetworkError) { // Expect a SPEECH_RECOGNITION_ERROR_NETWORK error to be raised. ASSERT_FALSE(engine_under_test_->IsRecognitionPending()); EndMockRecognition(); - ASSERT_EQ(content::SPEECH_RECOGNITION_ERROR_NETWORK, error_); + ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NETWORK, error_); ASSERT_EQ(0U, results_.size()); } @@ -309,7 +306,7 @@ TEST_F(GoogleStreamingRemoteEngineTest, Stability) { // Since there was no final result, we get an empty "no match" result. SpeechRecognitionResult empty_result; ExpectResultReceived(empty_result); - ASSERT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + ASSERT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); ASSERT_EQ(0U, results_.size()); } @@ -483,4 +480,4 @@ std::string GoogleStreamingRemoteEngineTest::ToBigEndian32(uint32 value) { return std::string(raw_data, sizeof(raw_data)); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/input_tag_speech_dispatcher_host.cc b/content/browser/speech/input_tag_speech_dispatcher_host.cc index fa8d405..32ba964 100644 --- a/content/browser/speech/input_tag_speech_dispatcher_host.cc +++ b/content/browser/speech/input_tag_speech_dispatcher_host.cc @@ -12,15 +12,11 @@ #include "content/public/browser/speech_recognition_session_config.h" #include "content/public/browser/speech_recognition_session_context.h" -using content::SpeechRecognitionManager; -using content::SpeechRecognitionSessionConfig; -using content::SpeechRecognitionSessionContext; - namespace { const uint32 kMaxHypothesesForSpeechInputTag = 6; } -namespace speech { +namespace content { SpeechRecognitionManager* InputTagSpeechDispatcherHost::manager_for_tests_; void InputTagSpeechDispatcherHost::SetManagerForTests( @@ -31,7 +27,7 @@ void InputTagSpeechDispatcherHost::SetManagerForTests( InputTagSpeechDispatcherHost::InputTagSpeechDispatcherHost( int render_process_id, net::URLRequestContextGetter* url_request_context_getter, - content::SpeechRecognitionPreferences* recognition_preferences) + SpeechRecognitionPreferences* recognition_preferences) : render_process_id_(render_process_id), url_request_context_getter_(url_request_context_getter), recognition_preferences_(recognition_preferences) { @@ -78,8 +74,7 @@ void InputTagSpeechDispatcherHost::OnStartRecognition( SpeechRecognitionSessionConfig config; config.language = params.language; if (!params.grammar.empty()) { - config.grammars.push_back( - content::SpeechRecognitionGrammar(params.grammar)); + config.grammars.push_back(SpeechRecognitionGrammar(params.grammar)); } config.max_hypotheses = kMaxHypothesesForSpeechInputTag; config.origin_url = params.origin_url; @@ -93,7 +88,7 @@ void InputTagSpeechDispatcherHost::OnStartRecognition( config.event_listener = this; int session_id = manager()->CreateSession(config); - DCHECK_NE(session_id, content::SpeechRecognitionManager::kSessionIDInvalid); + DCHECK_NE(session_id, SpeechRecognitionManager::kSessionIDInvalid); manager()->StartSession(session_id); } @@ -123,7 +118,8 @@ void InputTagSpeechDispatcherHost::OnStopRecording(int render_view_id, // -------- SpeechRecognitionEventListener interface implementation ----------- void InputTagSpeechDispatcherHost::OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) { + int session_id, + const SpeechRecognitionResult& result) { VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResult enter"; const SpeechRecognitionSessionContext& context = @@ -162,10 +158,11 @@ void InputTagSpeechDispatcherHost::OnAudioStart(int session_id) {} void InputTagSpeechDispatcherHost::OnSoundStart(int session_id) {} void InputTagSpeechDispatcherHost::OnSoundEnd(int session_id) {} void InputTagSpeechDispatcherHost::OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) {} + int session_id, + const SpeechRecognitionError& error) {} void InputTagSpeechDispatcherHost::OnAudioLevelsChange( int session_id, float volume, float noise_volume) {} void InputTagSpeechDispatcherHost::OnEnvironmentEstimationComplete( int session_id) {} -} // namespace speech +} // namespace content diff --git a/content/browser/speech/input_tag_speech_dispatcher_host.h b/content/browser/speech/input_tag_speech_dispatcher_host.h index 896da27..d1e99e4 100644 --- a/content/browser/speech/input_tag_speech_dispatcher_host.h +++ b/content/browser/speech/input_tag_speech_dispatcher_host.h @@ -15,25 +15,23 @@ struct InputTagSpeechHostMsg_StartRecognition_Params; namespace content { + class SpeechRecognitionManager; class SpeechRecognitionPreferences; struct SpeechRecognitionResult; -} - -namespace speech { // InputTagSpeechDispatcherHost is a delegate for Speech API messages used by // RenderMessageFilter. Basically it acts as a proxy, relaying the events coming // from the SpeechRecognitionManager to IPC messages (and vice versa). // It's the complement of SpeechRecognitionDispatcher (owned by RenderView). class CONTENT_EXPORT InputTagSpeechDispatcherHost - : public content::BrowserMessageFilter, - public content::SpeechRecognitionEventListener { + : public BrowserMessageFilter, + public SpeechRecognitionEventListener { public: InputTagSpeechDispatcherHost( int render_process_id, net::URLRequestContextGetter* url_request_context_getter, - content::SpeechRecognitionPreferences* recognition_preferences); + SpeechRecognitionPreferences* recognition_preferences); // SpeechRecognitionEventListener methods. virtual void OnRecognitionStart(int session_id) OVERRIDE; @@ -44,18 +42,21 @@ class CONTENT_EXPORT InputTagSpeechDispatcherHost virtual void OnAudioEnd(int session_id) OVERRIDE; virtual void OnRecognitionEnd(int session_id) OVERRIDE; virtual void OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) OVERRIDE; + int session_id, + const SpeechRecognitionResult& result) OVERRIDE; virtual void OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) OVERRIDE; - virtual void OnAudioLevelsChange( - int session_id, float volume, float noise_volume) OVERRIDE; + int session_id, + const SpeechRecognitionError& error) OVERRIDE; + virtual void OnAudioLevelsChange(int session_id, + float volume, + float noise_volume) OVERRIDE; - // content::BrowserMessageFilter implementation. + // BrowserMessageFilter implementation. virtual bool OnMessageReceived(const IPC::Message& message, bool* message_was_ok) OVERRIDE; // Singleton manager setter useful for tests. - static void SetManagerForTests(content::SpeechRecognitionManager* manager); + static void SetManagerForTests(SpeechRecognitionManager* manager); private: virtual ~InputTagSpeechDispatcherHost(); @@ -67,17 +68,17 @@ class CONTENT_EXPORT InputTagSpeechDispatcherHost // Returns the speech recognition manager to forward events to, creating one // if needed. - content::SpeechRecognitionManager* manager(); + SpeechRecognitionManager* manager(); int render_process_id_; scoped_refptr<net::URLRequestContextGetter> url_request_context_getter_; - scoped_refptr<content::SpeechRecognitionPreferences> recognition_preferences_; + scoped_refptr<SpeechRecognitionPreferences> recognition_preferences_; - static content::SpeechRecognitionManager* manager_for_tests_; + static SpeechRecognitionManager* manager_for_tests_; DISALLOW_COPY_AND_ASSIGN(InputTagSpeechDispatcherHost); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_INPUT_TAG_SPEECH_DISPATCHER_HOST_H_ diff --git a/content/browser/speech/proto/google_streaming_api.proto b/content/browser/speech/proto/google_streaming_api.proto index 292a2e4..314051c 100644 --- a/content/browser/speech/proto/google_streaming_api.proto +++ b/content/browser/speech/proto/google_streaming_api.proto @@ -8,7 +8,7 @@ option optimize_for = LITE_RUNTIME; // TODO(hans): Commented out due to compilation errors. // option cc_api_version = 2; -package speech.proto; +package content.proto; // SpeechRecognitionEvent is the only message type sent to client. // diff --git a/content/browser/speech/speech_recognition_browsertest.cc b/content/browser/speech/speech_recognition_browsertest.cc index f67a1c8..9986459 100644 --- a/content/browser/speech/speech_recognition_browsertest.cc +++ b/content/browser/speech/speech_recognition_browsertest.cc @@ -28,21 +28,11 @@ #include "content/test/content_browser_test_utils.h" #include "third_party/WebKit/Source/WebKit/chromium/public/WebInputEvent.h" -using content::NavigationController; -using content::SpeechRecognitionEventListener; -using content::SpeechRecognitionSessionConfig; -using content::SpeechRecognitionSessionContext; -using content::WebContents; - -namespace speech { -class FakeSpeechRecognitionManager; -} - -namespace speech { +namespace content { const char kTestResult[] = "Pictures of the moon"; -class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { +class FakeSpeechRecognitionManager : public SpeechRecognitionManager { public: FakeSpeechRecognitionManager() : session_id_(0), @@ -74,7 +64,7 @@ class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { // SpeechRecognitionManager methods. virtual int CreateSession( - const content::SpeechRecognitionSessionConfig& config) OVERRIDE { + const SpeechRecognitionSessionConfig& config) OVERRIDE { VLOG(1) << "FAKE CreateSession invoked."; EXPECT_EQ(0, session_id_); EXPECT_EQ(NULL, listener_); @@ -120,7 +110,7 @@ class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { } virtual void AbortAllSessionsForListener( - content::SpeechRecognitionEventListener* listener) OVERRIDE { + SpeechRecognitionEventListener* listener) OVERRIDE { VLOG(1) << "CancelAllRequestsWithDelegate invoked."; // listener_ is set to NULL if a fake result was received (see below), so // check that listener_ matches the incoming parameter only when there is @@ -152,7 +142,7 @@ class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { return session_config_; } - virtual content::SpeechRecognitionSessionContext GetSessionContext( + virtual SpeechRecognitionSessionContext GetSessionContext( int session_id) const OVERRIDE { EXPECT_EQ(session_id, session_id_); return session_ctx_; @@ -163,8 +153,8 @@ class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { if (session_id_) { // Do a check in case we were cancelled.. VLOG(1) << "Setting fake recognition result."; listener_->OnAudioEnd(session_id_); - content::SpeechRecognitionResult results; - results.hypotheses.push_back(content::SpeechRecognitionHypothesis( + SpeechRecognitionResult results; + results.hypotheses.push_back(SpeechRecognitionHypothesis( ASCIIToUTF16(kTestResult), 1.0)); listener_->OnRecognitionResult(session_id_, results); listener_->OnRecognitionEnd(session_id_); @@ -184,7 +174,7 @@ class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { base::WaitableEvent recognition_started_event_; }; -class SpeechRecognitionBrowserTest : public content::ContentBrowserTest { +class SpeechRecognitionBrowserTest : public ContentBrowserTest { public: // ContentBrowserTest methods virtual void SetUpCommandLine(CommandLine* command_line) { @@ -196,8 +186,8 @@ class SpeechRecognitionBrowserTest : public content::ContentBrowserTest { // The test page calculates the speech button's coordinate in the page on // load & sets that coordinate in the URL fragment. We send mouse down & up // events at that coordinate to trigger speech recognition. - GURL test_url = content::GetTestUrl("speech", filename); - content::NavigateToURL(shell(), test_url); + GURL test_url = GetTestUrl("speech", filename); + NavigateToURL(shell(), test_url); WebKit::WebMouseEvent mouse_event; mouse_event.type = WebKit::WebInputEvent::MouseDown; @@ -207,9 +197,9 @@ class SpeechRecognitionBrowserTest : public content::ContentBrowserTest { mouse_event.clickCount = 1; WebContents* web_contents = shell()->web_contents(); - content::WindowedNotificationObserver observer( - content::NOTIFICATION_LOAD_STOP, - content::Source<NavigationController>(&web_contents->GetController())); + WindowedNotificationObserver observer( + NOTIFICATION_LOAD_STOP, + Source<NavigationController>(&web_contents->GetController())); web_contents->GetRenderViewHost()->ForwardMouseEvent(mouse_event); mouse_event.type = WebKit::WebInputEvent::MouseUp; web_contents->GetRenderViewHost()->ForwardMouseEvent(mouse_event); @@ -250,10 +240,10 @@ class SpeechRecognitionBrowserTest : public content::ContentBrowserTest { // This is used by the static |fakeManager|, and it is a pointer rather than a // direct instance per the style guide. - static content::SpeechRecognitionManager* speech_recognition_manager_; + static SpeechRecognitionManager* speech_recognition_manager_; }; -content::SpeechRecognitionManager* +SpeechRecognitionManager* SpeechRecognitionBrowserTest::speech_recognition_manager_ = NULL; // TODO(satish): Once this flakiness has been fixed, add a second test here to @@ -288,9 +278,9 @@ IN_PROC_BROWSER_TEST_F(SpeechRecognitionBrowserTest, DISABLED_TestCancelAll) { // Make the renderer crash. This should trigger // InputTagSpeechDispatcherHost to cancel all pending sessions. - content::NavigateToURL(shell(), GURL(chrome::kChromeUICrashURL)); + NavigateToURL(shell(), GURL(chrome::kChromeUICrashURL)); EXPECT_TRUE(fake_speech_recognition_manager_.did_cancel_all()); } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/speech_recognition_dispatcher_host.cc b/content/browser/speech/speech_recognition_dispatcher_host.cc index c0391ec..6a398f9 100644 --- a/content/browser/speech/speech_recognition_dispatcher_host.cc +++ b/content/browser/speech/speech_recognition_dispatcher_host.cc @@ -14,11 +14,7 @@ #include "content/public/browser/speech_recognition_session_context.h" #include "content/public/common/content_switches.h" -using content::SpeechRecognitionManager; -using content::SpeechRecognitionSessionConfig; -using content::SpeechRecognitionSessionContext; - -namespace speech { +namespace content { SpeechRecognitionManager* SpeechRecognitionDispatcherHost::manager_for_tests_; void SpeechRecognitionDispatcherHost::SetManagerForTests( @@ -29,7 +25,7 @@ void SpeechRecognitionDispatcherHost::SetManagerForTests( SpeechRecognitionDispatcherHost::SpeechRecognitionDispatcherHost( int render_process_id, net::URLRequestContextGetter* context_getter, - content::SpeechRecognitionPreferences* recognition_preferences) + SpeechRecognitionPreferences* recognition_preferences) : render_process_id_(render_process_id), context_getter_(context_getter), recognition_preferences_(recognition_preferences) { @@ -97,7 +93,7 @@ void SpeechRecognitionDispatcherHost::OnStartRequest( config.event_listener = this; int session_id = manager()->CreateSession(config); - DCHECK_NE(session_id, content::SpeechRecognitionManager::kSessionIDInvalid); + DCHECK_NE(session_id, SpeechRecognitionManager::kSessionIDInvalid); manager()->StartSession(session_id); } @@ -109,7 +105,7 @@ void SpeechRecognitionDispatcherHost::OnAbortRequest(int render_view_id, // The renderer might provide an invalid |request_id| if the session was not // started as expected, e.g., due to unsatisfied security requirements. - if (session_id != content::SpeechRecognitionManager::kSessionIDInvalid) + if (session_id != SpeechRecognitionManager::kSessionIDInvalid) manager()->AbortSession(session_id); } @@ -121,7 +117,7 @@ void SpeechRecognitionDispatcherHost::OnStopCaptureRequest( // The renderer might provide an invalid |request_id| if the session was not // started as expected, e.g., due to unsatisfied security requirements. - if (session_id != content::SpeechRecognitionManager::kSessionIDInvalid) + if (session_id != SpeechRecognitionManager::kSessionIDInvalid) manager()->StopAudioCaptureForSession(session_id); } @@ -170,7 +166,8 @@ void SpeechRecognitionDispatcherHost::OnRecognitionEnd(int session_id) { } void SpeechRecognitionDispatcherHost::OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) { + int session_id, + const SpeechRecognitionResult& result) { const SpeechRecognitionSessionContext& context = manager()->GetSessionContext(session_id); Send(new SpeechRecognitionMsg_ResultRetrieved(context.render_view_id, @@ -179,7 +176,8 @@ void SpeechRecognitionDispatcherHost::OnRecognitionResult( } void SpeechRecognitionDispatcherHost::OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) { + int session_id, + const SpeechRecognitionError& error) { const SpeechRecognitionSessionContext& context = manager()->GetSessionContext(session_id); Send(new SpeechRecognitionMsg_ErrorOccurred(context.render_view_id, @@ -188,9 +186,13 @@ void SpeechRecognitionDispatcherHost::OnRecognitionError( } // The events below are currently not used by speech JS APIs implementation. -void SpeechRecognitionDispatcherHost::OnAudioLevelsChange( - int session_id, float volume, float noise_volume) {} +void SpeechRecognitionDispatcherHost::OnAudioLevelsChange(int session_id, + float volume, + float noise_volume) { +} + void SpeechRecognitionDispatcherHost::OnEnvironmentEstimationComplete( - int session_id) {} + int session_id) { +} -} // namespace speech +} // namespace content diff --git a/content/browser/speech/speech_recognition_dispatcher_host.h b/content/browser/speech/speech_recognition_dispatcher_host.h index ee381ce..df14953 100644 --- a/content/browser/speech/speech_recognition_dispatcher_host.h +++ b/content/browser/speech/speech_recognition_dispatcher_host.h @@ -14,25 +14,23 @@ struct SpeechRecognitionHostMsg_StartRequest_Params; namespace content { + class SpeechRecognitionManager; class SpeechRecognitionPreferences; struct SpeechRecognitionResult; -} - -namespace speech { // SpeechRecognitionDispatcherHost is a delegate for Speech API messages used by // RenderMessageFilter. Basically it acts as a proxy, relaying the events coming // from the SpeechRecognitionManager to IPC messages (and vice versa). // It's the complement of SpeechRecognitionDispatcher (owned by RenderView). class CONTENT_EXPORT SpeechRecognitionDispatcherHost - : public content::BrowserMessageFilter, - public content::SpeechRecognitionEventListener { + : public BrowserMessageFilter, + public SpeechRecognitionEventListener { public: SpeechRecognitionDispatcherHost( int render_process_id, net::URLRequestContextGetter* context_getter, - content::SpeechRecognitionPreferences* recognition_preferences); + SpeechRecognitionPreferences* recognition_preferences); // SpeechRecognitionEventListener methods. virtual void OnRecognitionStart(int session_id) OVERRIDE; @@ -43,18 +41,21 @@ class CONTENT_EXPORT SpeechRecognitionDispatcherHost virtual void OnAudioEnd(int session_id) OVERRIDE; virtual void OnRecognitionEnd(int session_id) OVERRIDE; virtual void OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) OVERRIDE; + int session_id, + const SpeechRecognitionResult& result) OVERRIDE; virtual void OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) OVERRIDE; - virtual void OnAudioLevelsChange( - int session_id, float volume, float noise_volume) OVERRIDE; + int session_id, + const SpeechRecognitionError& error) OVERRIDE; + virtual void OnAudioLevelsChange(int session_id, + float volume, + float noise_volume) OVERRIDE; - // content::BrowserMessageFilter implementation. + // BrowserMessageFilter implementation. virtual bool OnMessageReceived(const IPC::Message& message, bool* message_was_ok) OVERRIDE; // Singleton manager setter useful for tests. - static void SetManagerForTests(content::SpeechRecognitionManager* manager); + static void SetManagerForTests(SpeechRecognitionManager* manager); private: virtual ~SpeechRecognitionDispatcherHost(); @@ -65,17 +66,17 @@ class CONTENT_EXPORT SpeechRecognitionDispatcherHost void OnStopCaptureRequest(int render_view_id, int request_id); // Returns the speech recognition manager to forward requests to. - content::SpeechRecognitionManager* manager(); + SpeechRecognitionManager* manager(); int render_process_id_; scoped_refptr<net::URLRequestContextGetter> context_getter_; - scoped_refptr<content::SpeechRecognitionPreferences> recognition_preferences_; + scoped_refptr<SpeechRecognitionPreferences> recognition_preferences_; - static content::SpeechRecognitionManager* manager_for_tests_; + static SpeechRecognitionManager* manager_for_tests_; DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcherHost); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_SPEECH_RECOGNITION_DISPATCHER_HOST_H_ diff --git a/content/browser/speech/speech_recognition_engine.cc b/content/browser/speech/speech_recognition_engine.cc index 94311a1..53f0e91 100644 --- a/content/browser/speech/speech_recognition_engine.cc +++ b/content/browser/speech/speech_recognition_engine.cc @@ -10,7 +10,7 @@ const int kDefaultConfigBitsPerSample = 16; const uint32 kDefaultMaxHypotheses = 1; } // namespace -namespace speech { +namespace content { SpeechRecognitionEngine::Config::Config() : filter_profanities(false), @@ -24,4 +24,4 @@ SpeechRecognitionEngine::Config::Config() SpeechRecognitionEngine::Config::~Config() { } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/speech_recognition_engine.h b/content/browser/speech/speech_recognition_engine.h index 0e90ec8..abd94e9 100644 --- a/content/browser/speech/speech_recognition_engine.h +++ b/content/browser/speech/speech_recognition_engine.h @@ -12,13 +12,10 @@ #include "content/public/common/speech_recognition_grammar.h" namespace content { -struct SpeechRecognitionResult; -struct SpeechRecognitionError; -} - -namespace speech { class AudioChunk; +struct SpeechRecognitionResult; +struct SpeechRecognitionError; // This interface models the basic contract that a speech recognition engine, // either working locally or relying on a remote web-service, must obey. @@ -39,9 +36,9 @@ class SpeechRecognitionEngine { // (e.g., in the case of continuous speech recognition engine // implementations). virtual void OnSpeechRecognitionEngineResult( - const content::SpeechRecognitionResult& result) = 0; + const SpeechRecognitionResult& result) = 0; virtual void OnSpeechRecognitionEngineError( - const content::SpeechRecognitionError& error) = 0; + const SpeechRecognitionError& error) = 0; protected: virtual ~Delegate() {} @@ -53,7 +50,7 @@ class SpeechRecognitionEngine { ~Config(); std::string language; - content::SpeechRecognitionGrammarArray grammars; + SpeechRecognitionGrammarArray grammars; bool filter_profanities; bool continuous; bool interim_results; @@ -110,6 +107,6 @@ class SpeechRecognitionEngine { typedef SpeechRecognitionEngine::Delegate SpeechRecognitionEngineDelegate; typedef SpeechRecognitionEngine::Config SpeechRecognitionEngineConfig; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_SPEECH_RECOGNITION_ENGINE_H_ diff --git a/content/browser/speech/speech_recognition_manager_impl.cc b/content/browser/speech/speech_recognition_manager_impl.cc index be99298..16624b0 100644 --- a/content/browser/speech/speech_recognition_manager_impl.cc +++ b/content/browser/speech/speech_recognition_manager_impl.cc @@ -23,22 +23,12 @@ #include "media/audio/audio_manager.h" using base::Callback; -using content::BrowserMainLoop; -using content::BrowserThread; -using content::SpeechRecognitionEventListener; -using content::SpeechRecognitionManager; -using content::SpeechRecognitionResult; -using content::SpeechRecognitionSessionContext; -using content::SpeechRecognitionSessionConfig; namespace content { -SpeechRecognitionManager* SpeechRecognitionManager::GetInstance() { - return speech::SpeechRecognitionManagerImpl::GetInstance(); -} -} // namespace content namespace { -speech::SpeechRecognitionManagerImpl* g_speech_recognition_manager_impl; + +SpeechRecognitionManagerImpl* g_speech_recognition_manager_impl; void ShowAudioInputSettingsOnFileThread() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::FILE)); @@ -47,9 +37,12 @@ void ShowAudioInputSettingsOnFileThread() { if (audio_manager->CanShowAudioInputSettings()) audio_manager->ShowAudioInputSettings(); } + } // namespace -namespace speech { +SpeechRecognitionManager* SpeechRecognitionManager::GetInstance() { + return SpeechRecognitionManagerImpl::GetInstance(); +} #if !defined(OS_IOS) class SpeechRecognitionManagerImpl::PermissionRequest @@ -74,8 +67,8 @@ class SpeechRecognitionManagerImpl::PermissionRequest this, render_process_id, render_view_id, - media_stream::StreamOptions(content::MEDIA_DEVICE_AUDIO_CAPTURE, - content::MEDIA_DEVICE_VIDEO_CAPTURE), + media_stream::StreamOptions(MEDIA_DEVICE_AUDIO_CAPTURE, + MEDIA_DEVICE_VIDEO_CAPTURE), origin, &label_); } @@ -136,7 +129,7 @@ SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl() : primary_session_id_(kSessionIDInvalid), last_session_id_(kSessionIDInvalid), is_dispatching_event_(false), - delegate_(content::GetContentClient()->browser()-> + delegate_(GetContentClient()->browser()-> GetSpeechRecognitionManagerDelegate()), ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) { DCHECK(!g_speech_recognition_manager_impl); @@ -261,8 +254,8 @@ void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_START)); } else { - OnRecognitionError(session_id, content::SpeechRecognitionError( - content::SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); + OnRecognitionError(session_id, SpeechRecognitionError( + SPEECH_RECOGNITION_ERROR_NOT_ALLOWED)); MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); @@ -385,7 +378,7 @@ void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) { } void SpeechRecognitionManagerImpl::OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) { + int session_id, const SpeechRecognitionResult& result) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); if (!SessionExists(session_id)) return; @@ -397,7 +390,7 @@ void SpeechRecognitionManagerImpl::OnRecognitionResult( } void SpeechRecognitionManagerImpl::OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) { + int session_id, const SpeechRecognitionError& error) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); if (!SessionExists(session_id)) return; @@ -686,4 +679,4 @@ SpeechRecognitionManagerImpl::Session::Session() SpeechRecognitionManagerImpl::Session::~Session() { } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/speech_recognition_manager_impl.h b/content/browser/speech/speech_recognition_manager_impl.h index c5fb75e..a49e993 100644 --- a/content/browser/speech/speech_recognition_manager_impl.h +++ b/content/browser/speech/speech_recognition_manager_impl.h @@ -19,17 +19,14 @@ #include "content/public/browser/speech_recognition_session_context.h" #include "content/public/common/speech_recognition_error.h" -namespace content { -class BrowserMainLoop; -class SpeechRecognitionManagerDelegate; -} - namespace media_stream { class MediaStreamManager; } -namespace speech { +namespace content { +class BrowserMainLoop; +class SpeechRecognitionManagerDelegate; class SpeechRecognizer; // This is the manager for speech recognition. It is a single instance in @@ -52,8 +49,8 @@ class SpeechRecognizer; // - Relays also recognition results/status/error events of every session to // the catch-all snoop listener (optionally) provided by the delegate. class CONTENT_EXPORT SpeechRecognitionManagerImpl : - public NON_EXPORTED_BASE(content::SpeechRecognitionManager), - public content::SpeechRecognitionEventListener { + public NON_EXPORTED_BASE(SpeechRecognitionManager), + public SpeechRecognitionEventListener { public: // Returns the current SpeechRecognitionManagerImpl or NULL if the call is // issued when it is not created yet or destroyed (by BrowserMainLoop). @@ -61,17 +58,17 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : // SpeechRecognitionManager implementation. virtual int CreateSession( - const content::SpeechRecognitionSessionConfig& config) OVERRIDE; + const SpeechRecognitionSessionConfig& config) OVERRIDE; virtual void StartSession(int session_id) OVERRIDE; virtual void AbortSession(int session_id) OVERRIDE; virtual void AbortAllSessionsForListener( - content::SpeechRecognitionEventListener* listener) OVERRIDE; + SpeechRecognitionEventListener* listener) OVERRIDE; virtual void AbortAllSessionsForRenderView(int render_process_id, int render_view_id) OVERRIDE; virtual void StopAudioCaptureForSession(int session_id) OVERRIDE; - virtual const content::SpeechRecognitionSessionConfig& GetSessionConfig( + virtual const SpeechRecognitionSessionConfig& GetSessionConfig( int session_id) const OVERRIDE; - virtual content::SpeechRecognitionSessionContext GetSessionContext( + virtual SpeechRecognitionSessionContext GetSessionContext( int session_id) const OVERRIDE; virtual int GetSession(int render_process_id, int render_view_id, @@ -90,15 +87,15 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : virtual void OnAudioEnd(int session_id) OVERRIDE; virtual void OnRecognitionEnd(int session_id) OVERRIDE; virtual void OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) OVERRIDE; + int session_id, const SpeechRecognitionResult& result) OVERRIDE; virtual void OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) OVERRIDE; + int session_id, const SpeechRecognitionError& error) OVERRIDE; virtual void OnAudioLevelsChange(int session_id, float volume, float noise_volume) OVERRIDE; protected: // BrowserMainLoop is the only one allowed to istantiate and free us. - friend class content::BrowserMainLoop; + friend class BrowserMainLoop; friend class scoped_ptr<SpeechRecognitionManagerImpl>; // Needed for dtor. SpeechRecognitionManagerImpl(); virtual ~SpeechRecognitionManagerImpl(); @@ -127,8 +124,8 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : int id; bool listener_is_active; - content::SpeechRecognitionSessionConfig config; - content::SpeechRecognitionSessionContext context; + SpeechRecognitionSessionConfig config; + SpeechRecognitionSessionContext context; scoped_refptr<SpeechRecognizer> recognizer; }; @@ -159,8 +156,8 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : bool SessionExists(int session_id) const; const Session& GetSession(int session_id) const; - content::SpeechRecognitionEventListener* GetListener(int session_id) const; - content::SpeechRecognitionEventListener* GetDelegateListener() const; + SpeechRecognitionEventListener* GetListener(int session_id) const; + SpeechRecognitionEventListener* GetDelegateListener() const; int GetNextSessionID(); typedef std::map<int, Session> SessionsTable; @@ -168,7 +165,7 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : int primary_session_id_; int last_session_id_; bool is_dispatching_event_; - scoped_ptr<content::SpeechRecognitionManagerDelegate> delegate_; + scoped_ptr<SpeechRecognitionManagerDelegate> delegate_; // Used for posting asynchronous tasks (on the IO thread) without worrying // about this class being destroyed in the meanwhile (due to browser shutdown) @@ -181,6 +178,6 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : #endif // !defined(OS_IOS) }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_SPEECH_RECOGNITION_MANAGER_IMPL_H_ diff --git a/content/browser/speech/speech_recognizer.cc b/content/browser/speech/speech_recognizer.cc index 9eefc5c..481306b 100644 --- a/content/browser/speech/speech_recognizer.cc +++ b/content/browser/speech/speech_recognizer.cc @@ -17,17 +17,12 @@ #include "content/public/common/speech_recognition_result.h" #include "net/url_request/url_request_context_getter.h" -using content::BrowserMainLoop; -using content::BrowserThread; -using content::SpeechRecognitionError; -using content::SpeechRecognitionEventListener; -using content::SpeechRecognitionGrammar; -using content::SpeechRecognitionResult; using media::AudioInputController; using media::AudioManager; using media::AudioParameters; using media::ChannelLayout; +namespace content { namespace { // The following constants are related to the volume level indicator shown in @@ -47,7 +42,7 @@ const float kAudioMeterDbRange = kAudioMeterMaxDb - kAudioMeterMinDb; const float kAudioMeterRangeMaxUnclipped = 47.0f / 48.0f; // Returns true if more than 5% of the samples are at min or max value. -bool DetectClipping(const speech::AudioChunk& chunk) { +bool DetectClipping(const AudioChunk& chunk) { const int num_samples = chunk.NumSamples(); const int16* samples = chunk.SamplesData16(); const int kThreshold = num_samples / 20; @@ -67,8 +62,6 @@ void KeepAudioControllerRefcountedForDtor(scoped_refptr<AudioInputController>) { } // namespace -namespace speech { - const int SpeechRecognizer::kAudioSampleRate = 16000; const ChannelLayout SpeechRecognizer::kChannelLayout = media::CHANNEL_LAYOUT_MONO; @@ -194,7 +187,7 @@ void SpeechRecognizer::OnData(AudioInputController* controller, void SpeechRecognizer::OnAudioClosed(AudioInputController*) {} void SpeechRecognizer::OnSpeechRecognitionEngineResult( - const content::SpeechRecognitionResult& result) { + const SpeechRecognitionResult& result) { FSMEventArgs event_args(EVENT_ENGINE_RESULT); event_args.engine_result = result; BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, @@ -203,7 +196,7 @@ void SpeechRecognizer::OnSpeechRecognitionEngineResult( } void SpeechRecognizer::OnSpeechRecognitionEngineError( - const content::SpeechRecognitionError& error) { + const SpeechRecognitionError& error) { FSMEventArgs event_args(EVENT_ENGINE_ERROR); event_args.engine_error = error; BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, @@ -406,15 +399,13 @@ SpeechRecognizer::StartRecording(const FSMEventArgs&) { listener_->OnRecognitionStart(session_id_); if (!audio_manager->HasAudioInputDevices()) { - return Abort(SpeechRecognitionError( - content::SPEECH_RECOGNITION_ERROR_AUDIO, - content::SPEECH_AUDIO_ERROR_DETAILS_NO_MIC)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO, + SPEECH_AUDIO_ERROR_DETAILS_NO_MIC)); } if (audio_manager->IsRecordingInProcess()) { - return Abort(SpeechRecognitionError( - content::SPEECH_RECOGNITION_ERROR_AUDIO, - content::SPEECH_AUDIO_ERROR_DETAILS_IN_USE)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO, + SPEECH_AUDIO_ERROR_DETAILS_IN_USE)); } const int samples_per_packet = (kAudioSampleRate * @@ -425,8 +416,7 @@ SpeechRecognizer::StartRecording(const FSMEventArgs&) { audio_controller_ = AudioInputController::Create(audio_manager, this, params); if (audio_controller_.get() == NULL) { - return Abort( - SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_AUDIO)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO)); } // The endpointer needs to estimate the environment/background noise before @@ -471,8 +461,7 @@ SpeechRecognizer::DetectUserSpeechOrTimeout(const FSMEventArgs&) { listener_->OnSoundStart(session_id_); return STATE_RECOGNIZING; } else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs) { - return Abort( - SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_NO_SPEECH)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH)); } return STATE_WAITING_FOR_SPEECH; } @@ -503,20 +492,17 @@ SpeechRecognizer::FSMState SpeechRecognizer::AbortSilently(const FSMEventArgs& event_args) { DCHECK_NE(event_args.event, EVENT_AUDIO_ERROR); DCHECK_NE(event_args.event, EVENT_ENGINE_ERROR); - return Abort( - SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_NONE)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NONE)); } SpeechRecognizer::FSMState SpeechRecognizer::AbortWithError(const FSMEventArgs& event_args) { if (event_args.event == EVENT_AUDIO_ERROR) { - return Abort( - SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_AUDIO)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO)); } else if (event_args.event == EVENT_ENGINE_ERROR) { return Abort(event_args.engine_error); } - return Abort( - SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_ABORTED)); + return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_ABORTED)); } SpeechRecognizer::FSMState SpeechRecognizer::Abort( @@ -538,7 +524,7 @@ SpeechRecognizer::FSMState SpeechRecognizer::Abort( if (state_ > STATE_STARTING && state_ < STATE_WAITING_FINAL_RESULT) listener_->OnAudioEnd(session_id_); - if (error.code != content::SPEECH_RECOGNITION_ERROR_NONE) + if (error.code != SPEECH_RECOGNITION_ERROR_NONE) listener_->OnRecognitionError(session_id_, error); listener_->OnRecognitionEnd(session_id_); @@ -660,10 +646,10 @@ SpeechRecognizer::FSMEventArgs::FSMEventArgs(FSMEvent event_value) : event(event_value), audio_error_code(0), audio_data(NULL), - engine_error(content::SPEECH_RECOGNITION_ERROR_NONE) { + engine_error(SPEECH_RECOGNITION_ERROR_NONE) { } SpeechRecognizer::FSMEventArgs::~FSMEventArgs() { } -} // namespace speech +} // namespace content diff --git a/content/browser/speech/speech_recognizer.h b/content/browser/speech/speech_recognizer.h index 9ac813c..7df44fe 100644 --- a/content/browser/speech/speech_recognizer.h +++ b/content/browser/speech/speech_recognizer.h @@ -15,15 +15,14 @@ #include "media/audio/audio_input_controller.h" #include "net/url_request/url_request_context_getter.h" -namespace content { -class SpeechRecognitionEventListener; -} - namespace media { class AudioManager; } -namespace speech { +namespace content { + +class SpeechRecognitionEventListener; + // Handles speech recognition for a session (identified by |session_id|), taking // care of audio capture, silence detection/endpointer and interaction with the // SpeechRecognitionEngine. @@ -40,11 +39,10 @@ class CONTENT_EXPORT SpeechRecognizer static void SetAudioManagerForTests(media::AudioManager* audio_manager); - SpeechRecognizer( - content::SpeechRecognitionEventListener* listener, - int session_id, - bool is_single_shot, - SpeechRecognitionEngine* engine); + SpeechRecognizer(SpeechRecognitionEventListener* listener, + int session_id, + bool is_single_shot, + SpeechRecognitionEngine* engine); void StartRecognition(); void AbortRecognition(); @@ -85,8 +83,8 @@ class CONTENT_EXPORT SpeechRecognizer FSMEvent event; int audio_error_code; scoped_refptr<AudioChunk> audio_data; - content::SpeechRecognitionResult engine_result; - content::SpeechRecognitionError engine_error; + SpeechRecognitionResult engine_result; + SpeechRecognitionError engine_error; }; virtual ~SpeechRecognizer(); @@ -111,7 +109,7 @@ class CONTENT_EXPORT SpeechRecognizer FSMState ProcessFinalResult(const FSMEventArgs& event_args); FSMState AbortSilently(const FSMEventArgs& event_args); FSMState AbortWithError(const FSMEventArgs& event_args); - FSMState Abort(const content::SpeechRecognitionError& error); + FSMState Abort(const SpeechRecognitionError& error); FSMState DetectEndOfSpeech(const FSMEventArgs& event_args); FSMState DoNothing(const FSMEventArgs& event_args) const; FSMState NotFeasible(const FSMEventArgs& event_args); @@ -138,13 +136,13 @@ class CONTENT_EXPORT SpeechRecognizer // SpeechRecognitionEngineDelegate methods. virtual void OnSpeechRecognitionEngineResult( - const content::SpeechRecognitionResult& result) OVERRIDE; + const SpeechRecognitionResult& result) OVERRIDE; virtual void OnSpeechRecognitionEngineError( - const content::SpeechRecognitionError& error) OVERRIDE; + const SpeechRecognitionError& error) OVERRIDE; static media::AudioManager* audio_manager_for_tests_; - content::SpeechRecognitionEventListener* listener_; + SpeechRecognitionEventListener* listener_; scoped_ptr<SpeechRecognitionEngine> recognition_engine_; Endpointer endpointer_; scoped_refptr<media::AudioInputController> audio_controller_; @@ -158,6 +156,6 @@ class CONTENT_EXPORT SpeechRecognizer DISALLOW_COPY_AND_ASSIGN(SpeechRecognizer); }; -} // namespace speech +} // namespace content #endif // CONTENT_BROWSER_SPEECH_SPEECH_RECOGNIZER_H_ diff --git a/content/browser/speech/speech_recognizer_unittest.cc b/content/browser/speech/speech_recognizer_unittest.cc index bd39c19..2741767 100644 --- a/content/browser/speech/speech_recognizer_unittest.cc +++ b/content/browser/speech/speech_recognizer_unittest.cc @@ -18,8 +18,6 @@ #include "testing/gtest/include/gtest/gtest.h" using base::MessageLoopProxy; -using content::BrowserThread; -using content::BrowserThreadImpl; using media::AudioInputController; using media::AudioInputStream; using media::AudioManager; @@ -28,9 +26,9 @@ using media::AudioParameters; using media::TestAudioInputController; using media::TestAudioInputControllerFactory; -namespace speech { +namespace content { -class SpeechRecognizerTest : public content::SpeechRecognitionEventListener, +class SpeechRecognizerTest : public SpeechRecognitionEventListener, public testing::Test { public: SpeechRecognizerTest() @@ -42,7 +40,7 @@ class SpeechRecognizerTest : public content::SpeechRecognitionEventListener, audio_ended_(false), sound_started_(false), sound_ended_(false), - error_(content::SPEECH_RECOGNITION_ERROR_NONE), + error_(SPEECH_RECOGNITION_ERROR_NONE), volume_(-1.0f) { // SpeechRecognizer takes ownership of sr_engine. SpeechRecognitionEngine* sr_engine = @@ -87,7 +85,7 @@ class SpeechRecognizerTest : public content::SpeechRecognitionEventListener, EXPECT_FALSE(sound_started_ ^ sound_ended_); } - // Overridden from content::SpeechRecognitionEventListener: + // Overridden from SpeechRecognitionEventListener: virtual void OnAudioStart(int session_id) OVERRIDE { audio_started_ = true; CheckEventsConsistency(); @@ -99,12 +97,12 @@ class SpeechRecognizerTest : public content::SpeechRecognitionEventListener, } virtual void OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) OVERRIDE { + int session_id, const SpeechRecognitionResult& result) OVERRIDE { result_received_ = true; } virtual void OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) OVERRIDE { + int session_id, const SpeechRecognitionError& error) OVERRIDE { EXPECT_TRUE(recognition_started_); EXPECT_FALSE(recognition_ended_); error_ = error.code; @@ -175,7 +173,7 @@ class SpeechRecognizerTest : public content::SpeechRecognitionEventListener, bool audio_ended_; bool sound_started_; bool sound_ended_; - content::SpeechRecognitionErrorCode error_; + SpeechRecognitionErrorCode error_; net::TestURLFetcherFactory url_fetcher_factory_; TestAudioInputControllerFactory audio_input_controller_factory_; std::vector<uint8> audio_packet_; @@ -191,7 +189,7 @@ TEST_F(SpeechRecognizerTest, StopNoData) { EXPECT_TRUE(recognition_started_); EXPECT_FALSE(audio_started_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); CheckFinalEventsConsistency(); } @@ -204,7 +202,7 @@ TEST_F(SpeechRecognizerTest, CancelNoData) { EXPECT_TRUE(recognition_started_); EXPECT_FALSE(audio_started_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_ABORTED, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_ABORTED, error_); CheckFinalEventsConsistency(); } @@ -237,7 +235,7 @@ TEST_F(SpeechRecognizerTest, StopWithData) { EXPECT_TRUE(audio_ended_); EXPECT_FALSE(recognition_ended_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); // Issue the network callback to complete the process. net::TestURLFetcher* fetcher = url_fetcher_factory_.GetFetcherByID(0); @@ -254,7 +252,7 @@ TEST_F(SpeechRecognizerTest, StopWithData) { MessageLoop::current()->RunAllPending(); EXPECT_TRUE(recognition_ended_); EXPECT_TRUE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); CheckFinalEventsConsistency(); } @@ -274,7 +272,7 @@ TEST_F(SpeechRecognizerTest, CancelWithData) { EXPECT_TRUE(recognition_started_); EXPECT_TRUE(audio_started_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_ABORTED, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_ABORTED, error_); CheckFinalEventsConsistency(); } @@ -298,7 +296,7 @@ TEST_F(SpeechRecognizerTest, ConnectionError) { EXPECT_TRUE(audio_ended_); EXPECT_FALSE(recognition_ended_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); // Issue the network callback to complete the process. fetcher->set_url(fetcher->GetOriginalURL()); @@ -312,7 +310,7 @@ TEST_F(SpeechRecognizerTest, ConnectionError) { MessageLoop::current()->RunAllPending(); EXPECT_TRUE(recognition_ended_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NETWORK, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NETWORK, error_); CheckFinalEventsConsistency(); } @@ -336,7 +334,7 @@ TEST_F(SpeechRecognizerTest, ServerError) { EXPECT_TRUE(audio_ended_); EXPECT_FALSE(recognition_ended_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); // Issue the network callback to complete the process. fetcher->set_url(fetcher->GetOriginalURL()); @@ -349,7 +347,7 @@ TEST_F(SpeechRecognizerTest, ServerError) { MessageLoop::current()->RunAllPending(); EXPECT_TRUE(recognition_ended_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NETWORK, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NETWORK, error_); CheckFinalEventsConsistency(); } @@ -365,7 +363,7 @@ TEST_F(SpeechRecognizerTest, AudioControllerErrorNoData) { EXPECT_TRUE(recognition_started_); EXPECT_FALSE(audio_started_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_AUDIO, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_AUDIO, error_); CheckFinalEventsConsistency(); } @@ -385,7 +383,7 @@ TEST_F(SpeechRecognizerTest, AudioControllerErrorWithData) { EXPECT_TRUE(recognition_started_); EXPECT_TRUE(audio_started_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_AUDIO, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_AUDIO, error_); CheckFinalEventsConsistency(); } @@ -409,7 +407,7 @@ TEST_F(SpeechRecognizerTest, NoSpeechCallbackIssued) { EXPECT_TRUE(recognition_started_); EXPECT_TRUE(audio_started_); EXPECT_FALSE(result_received_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NO_SPEECH, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NO_SPEECH, error_); CheckFinalEventsConsistency(); } @@ -442,7 +440,7 @@ TEST_F(SpeechRecognizerTest, NoSpeechCallbackNotIssued) { } MessageLoop::current()->RunAllPending(); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); EXPECT_TRUE(audio_started_); EXPECT_FALSE(audio_ended_); EXPECT_FALSE(recognition_ended_); @@ -488,7 +486,7 @@ TEST_F(SpeechRecognizerTest, SetInputVolumeCallback) { EXPECT_FLOAT_EQ(0.89926866f, volume_); EXPECT_FLOAT_EQ(0.75071919f, noise_volume_); - EXPECT_EQ(content::SPEECH_RECOGNITION_ERROR_NONE, error_); + EXPECT_EQ(SPEECH_RECOGNITION_ERROR_NONE, error_); EXPECT_FALSE(audio_ended_); EXPECT_FALSE(recognition_ended_); recognizer_->AbortRecognition(); @@ -496,4 +494,4 @@ TEST_F(SpeechRecognizerTest, SetInputVolumeCallback) { CheckFinalEventsConsistency(); } -} // namespace speech +} // namespace content |