summaryrefslogtreecommitdiffstats
path: root/content/browser/speech
diff options
context:
space:
mode:
authorrsleevi@chromium.org <rsleevi@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-02 21:13:46 +0000
committerrsleevi@chromium.org <rsleevi@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-02 21:13:46 +0000
commitfc72bb18b111ff63e57135d97de6d59291f3b7b8 (patch)
treef7fedf0a0577e38a0486e8bdc88a47a508bf122d /content/browser/speech
parent7cd76fded67d66fb8ea4f5abce5241ad71d749a9 (diff)
downloadchromium_src-fc72bb18b111ff63e57135d97de6d59291f3b7b8.zip
chromium_src-fc72bb18b111ff63e57135d97de6d59291f3b7b8.tar.gz
chromium_src-fc72bb18b111ff63e57135d97de6d59291f3b7b8.tar.bz2
Update content/ to use scoped_refptr<T>::get() rather than implicit "operator T*"
Linux fixes BUG=110610 TBR=darin Review URL: https://chromiumcodereview.appspot.com/16294003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@203624 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/browser/speech')
-rw-r--r--content/browser/speech/endpointer/endpointer_unittest.cc2
-rw-r--r--content/browser/speech/google_one_shot_remote_engine.cc6
-rw-r--r--content/browser/speech/google_one_shot_remote_engine_unittest.cc2
-rw-r--r--content/browser/speech/google_streaming_remote_engine.cc10
-rw-r--r--content/browser/speech/google_streaming_remote_engine_unittest.cc2
-rw-r--r--content/browser/speech/input_tag_speech_dispatcher_host.cc2
-rw-r--r--content/browser/speech/speech_recognition_dispatcher_host.cc2
-rw-r--r--content/browser/speech/speech_recognition_manager_impl.cc8
-rw-r--r--content/browser/speech/speech_recognizer_impl.cc10
9 files changed, 22 insertions, 22 deletions
diff --git a/content/browser/speech/endpointer/endpointer_unittest.cc b/content/browser/speech/endpointer/endpointer_unittest.cc
index ed5a1fe..306a5ee 100644
--- a/content/browser/speech/endpointer/endpointer_unittest.cc
+++ b/content/browser/speech/endpointer/endpointer_unittest.cc
@@ -123,7 +123,7 @@ class EndpointerFrameProcessor : public FrameProcessor {
int frame_size) OVERRIDE {
scoped_refptr<AudioChunk> frame(
new AudioChunk(reinterpret_cast<uint8*>(samples), kFrameSize * 2, 2));
- endpointer_->ProcessAudio(*frame, NULL);
+ endpointer_->ProcessAudio(*frame.get(), NULL);
int64 ep_time;
return endpointer_->Status(&ep_time);
}
diff --git a/content/browser/speech/google_one_shot_remote_engine.cc b/content/browser/speech/google_one_shot_remote_engine.cc
index 113a939..00a83e4 100644
--- a/content/browser/speech/google_one_shot_remote_engine.cc
+++ b/content/browser/speech/google_one_shot_remote_engine.cc
@@ -164,7 +164,7 @@ void GoogleOneShotRemoteEngine::StartRecognition() {
DCHECK(!url_fetcher_.get());
std::string lang_param = config_.language;
- if (lang_param.empty() && url_context_) {
+ if (lang_param.empty() && url_context_.get()) {
// If no language is provided then we use the first from the accepted
// language list. If this list is empty then it defaults to "en-US".
// Example of the contents of this list: "es,en-GB;q=0.8", ""
@@ -211,7 +211,7 @@ void GoogleOneShotRemoteEngine::StartRecognition() {
net::URLFetcher::POST,
this));
url_fetcher_->SetChunkedUpload(encoder_->mime_type());
- url_fetcher_->SetRequestContext(url_context_);
+ url_fetcher_->SetRequestContext(url_context_.get());
url_fetcher_->SetReferrer(config_.origin_url);
// The speech recognition API does not require user identification as part
@@ -249,7 +249,7 @@ void GoogleOneShotRemoteEngine::AudioChunksEnded() {
new AudioChunk(reinterpret_cast<uint8*>(&samples[0]),
samples.size() * sizeof(int16),
encoder_->bits_per_sample() / 8));
- encoder_->Encode(*dummy_chunk);
+ encoder_->Encode(*dummy_chunk.get());
encoder_->Flush();
scoped_refptr<AudioChunk> encoded_dummy_data(
encoder_->GetEncodedDataAndClear());
diff --git a/content/browser/speech/google_one_shot_remote_engine_unittest.cc b/content/browser/speech/google_one_shot_remote_engine_unittest.cc
index 7f06f86..cd6d418 100644
--- a/content/browser/speech/google_one_shot_remote_engine_unittest.cc
+++ b/content/browser/speech/google_one_shot_remote_engine_unittest.cc
@@ -59,7 +59,7 @@ void GoogleOneShotRemoteEngineTest::CreateAndTestRequest(
2 /* bytes per sample */));
client.set_delegate(this);
client.StartRecognition();
- client.TakeAudioChunk(*dummy_audio_chunk);
+ client.TakeAudioChunk(*dummy_audio_chunk.get());
client.AudioChunksEnded();
net::TestURLFetcher* fetcher = url_fetcher_factory_.GetFetcherByID(0);
ASSERT_TRUE(fetcher);
diff --git a/content/browser/speech/google_streaming_remote_engine.cc b/content/browser/speech/google_streaming_remote_engine.cc
index d86c180..514a949 100644
--- a/content/browser/speech/google_streaming_remote_engine.cc
+++ b/content/browser/speech/google_streaming_remote_engine.cc
@@ -327,7 +327,7 @@ GoogleStreamingRemoteEngine::ConnectBothStreams(const FSMEventArgs&) {
downstream_fetcher_.reset(URLFetcher::Create(
kDownstreamUrlFetcherIdForTests, downstream_url, URLFetcher::GET, this));
- downstream_fetcher_->SetRequestContext(url_context_);
+ downstream_fetcher_->SetRequestContext(url_context_.get());
downstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
net::LOAD_DO_NOT_SEND_COOKIES |
net::LOAD_DO_NOT_SEND_AUTH_DATA);
@@ -367,7 +367,7 @@ GoogleStreamingRemoteEngine::ConnectBothStreams(const FSMEventArgs&) {
upstream_fetcher_.reset(URLFetcher::Create(
kUpstreamUrlFetcherIdForTests, upstream_url, URLFetcher::POST, this));
upstream_fetcher_->SetChunkedUpload(encoder_->mime_type());
- upstream_fetcher_->SetRequestContext(url_context_);
+ upstream_fetcher_->SetRequestContext(url_context_.get());
upstream_fetcher_->SetReferrer(config_.origin_url);
upstream_fetcher_->SetLoadFlags(net::LOAD_DO_NOT_SAVE_COOKIES |
net::LOAD_DO_NOT_SEND_COOKIES |
@@ -382,7 +382,7 @@ GoogleStreamingRemoteEngine::TransmitAudioUpstream(
const FSMEventArgs& event_args) {
DCHECK(upstream_fetcher_.get());
DCHECK(event_args.audio_data.get());
- const AudioChunk& audio = *(event_args.audio_data);
+ const AudioChunk& audio = *(event_args.audio_data.get());
DCHECK_EQ(audio.bytes_per_sample(), config_.audio_num_bits_per_sample / 8);
encoder_->Encode(audio);
@@ -494,7 +494,7 @@ GoogleStreamingRemoteEngine::CloseUpstreamAndWaitForResults(
new AudioChunk(reinterpret_cast<uint8*>(&samples[0]),
samples.size() * sizeof(short),
encoder_->bits_per_sample() / 8);
- encoder_->Encode(*dummy_chunk);
+ encoder_->Encode(*dummy_chunk.get());
encoder_->Flush();
scoped_refptr<AudioChunk> encoded_dummy_data =
encoder_->GetEncodedDataAndClear();
@@ -554,7 +554,7 @@ GoogleStreamingRemoteEngine::NotFeasible(const FSMEventArgs& event_args) {
std::string GoogleStreamingRemoteEngine::GetAcceptedLanguages() const {
std::string langs = config_.language;
- if (langs.empty() && url_context_) {
+ if (langs.empty() && url_context_.get()) {
// If no language is provided then we use the first from the accepted
// language list. If this list is empty then it defaults to "en-US".
// Example of the contents of this list: "es,en-GB;q=0.8", ""
diff --git a/content/browser/speech/google_streaming_remote_engine_unittest.cc b/content/browser/speech/google_streaming_remote_engine_unittest.cc
index cd23d4b..c1fa006 100644
--- a/content/browser/speech/google_streaming_remote_engine_unittest.cc
+++ b/content/browser/speech/google_streaming_remote_engine_unittest.cc
@@ -380,7 +380,7 @@ void GoogleStreamingRemoteEngineTest::InjectDummyAudioChunk() {
sizeof(dummy_audio_buffer_data),
2 /* bytes per sample */));
DCHECK(engine_under_test_.get());
- engine_under_test_->TakeAudioChunk(*dummy_audio_chunk);
+ engine_under_test_->TakeAudioChunk(*dummy_audio_chunk.get());
}
size_t GoogleStreamingRemoteEngineTest::UpstreamChunksUploadedFromLastCall() {
diff --git a/content/browser/speech/input_tag_speech_dispatcher_host.cc b/content/browser/speech/input_tag_speech_dispatcher_host.cc
index b064fbc..8c4acdb 100644
--- a/content/browser/speech/input_tag_speech_dispatcher_host.cc
+++ b/content/browser/speech/input_tag_speech_dispatcher_host.cc
@@ -128,7 +128,7 @@ void InputTagSpeechDispatcherHost::StartRecognitionOnIO(
config.origin_url = params.origin_url;
config.initial_context = context;
config.url_request_context_getter = url_request_context_getter_.get();
- if (recognition_preferences_) {
+ if (recognition_preferences_.get()) {
config.filter_profanities = recognition_preferences_->FilterProfanities();
} else {
config.filter_profanities = false;
diff --git a/content/browser/speech/speech_recognition_dispatcher_host.cc b/content/browser/speech/speech_recognition_dispatcher_host.cc
index 2dc993a..7f20ca4 100644
--- a/content/browser/speech/speech_recognition_dispatcher_host.cc
+++ b/content/browser/speech/speech_recognition_dispatcher_host.cc
@@ -79,7 +79,7 @@ void SpeechRecognitionDispatcherHost::OnStartRequest(
config.origin_url = params.origin_url;
config.initial_context = context;
config.url_request_context_getter = context_getter_.get();
- if (recognition_preferences_) {
+ if (recognition_preferences_.get()) {
config.filter_profanities = recognition_preferences_->FilterProfanities();
} else {
config.filter_profanities = false;
diff --git a/content/browser/speech/speech_recognition_manager_impl.cc b/content/browser/speech/speech_recognition_manager_impl.cc
index beae421..0b77003 100644
--- a/content/browser/speech/speech_recognition_manager_impl.cc
+++ b/content/browser/speech/speech_recognition_manager_impl.cc
@@ -118,10 +118,10 @@ int SpeechRecognitionManagerImpl::CreateSession(
SpeechRecognitionEngine* google_remote_engine;
if (config.is_legacy_api) {
google_remote_engine =
- new GoogleOneShotRemoteEngine(config.url_request_context_getter);
+ new GoogleOneShotRemoteEngine(config.url_request_context_getter.get());
} else {
- google_remote_engine =
- new GoogleStreamingRemoteEngine(config.url_request_context_getter);
+ google_remote_engine = new GoogleStreamingRemoteEngine(
+ config.url_request_context_getter.get());
}
google_remote_engine->SetConfig(remote_engine_config);
@@ -602,7 +602,7 @@ void SpeechRecognitionManagerImpl::ResetCapturingSessionId(
}
void SpeechRecognitionManagerImpl::SessionDelete(const Session& session) {
- DCHECK(session.recognizer == NULL || !session.recognizer->IsActive());
+ DCHECK(session.recognizer.get() == NULL || !session.recognizer->IsActive());
if (primary_session_id_ == session.id)
primary_session_id_ = kSessionIDInvalid;
sessions_.erase(session.id);
diff --git a/content/browser/speech/speech_recognizer_impl.cc b/content/browser/speech/speech_recognizer_impl.cc
index c1789a7..9216478 100644
--- a/content/browser/speech/speech_recognizer_impl.cc
+++ b/content/browser/speech/speech_recognizer_impl.cc
@@ -151,9 +151,9 @@ SpeechRecognizerImpl::recognition_engine() const {
SpeechRecognizerImpl::~SpeechRecognizerImpl() {
endpointer_.EndSession();
- if (audio_controller_) {
- audio_controller_->Close(base::Bind(&KeepAudioControllerRefcountedForDtor,
- audio_controller_));
+ if (audio_controller_.get()) {
+ audio_controller_->Close(
+ base::Bind(&KeepAudioControllerRefcountedForDtor, audio_controller_));
}
}
@@ -225,7 +225,7 @@ void SpeechRecognizerImpl::DispatchEvent(const FSMEventArgs& event_args) {
if (event_args.event == EVENT_AUDIO_DATA) {
DCHECK(event_args.audio_data.get() != NULL);
- ProcessAudioPipeline(*event_args.audio_data);
+ ProcessAudioPipeline(*event_args.audio_data.get());
}
// The audio pipeline must be processed before the event dispatch, otherwise
@@ -439,7 +439,7 @@ SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) {
// This is a little hack, since TakeAudioChunk() is already called by
// ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping
// the first audio chunk captured after opening the audio device.
- recognition_engine_->TakeAudioChunk(*(event_args.audio_data));
+ recognition_engine_->TakeAudioChunk(*(event_args.audio_data.get()));
return STATE_ESTIMATING_ENVIRONMENT;
}