summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Kasten <gkasten@google.com>2012-03-12 16:29:55 -0700
committerGlenn Kasten <gkasten@google.com>2012-03-13 11:09:47 -0700
commit18db49a46259020387c40fff36d92edc1087a366 (patch)
treedf524d9cb807626d23cc0173be8fd09cd99353c2
parent065781397099719805a0a42023581b2622984dd4 (diff)
downloadframeworks_base-18db49a46259020387c40fff36d92edc1087a366.zip
frameworks_base-18db49a46259020387c40fff36d92edc1087a366.tar.gz
frameworks_base-18db49a46259020387c40fff36d92edc1087a366.tar.bz2
Whitespace and indentation
Fix indentation to be multiple of 4. Make it easier to search: sp< not sp < to "switch (...)" instead of "switch(...)" (also "if" and "while") Remove redundant blank line at start or EOF. Remove whitespace at end of line. Remove extra blank lines where they don't add value. Use git diff -b or -w to verify. Change-Id: I966b7ba852faa5474be6907fb212f5e267c2874e
-rw-r--r--core/jni/android_media_AudioRecord.cpp108
-rw-r--r--core/jni/android_media_AudioSystem.cpp2
-rw-r--r--core/jni/android_media_AudioTrack.cpp106
-rw-r--r--core/jni/android_media_JetPlayer.cpp36
-rw-r--r--include/media/IAudioRecord.h6
-rw-r--r--include/media/IAudioTrack.h8
-rw-r--r--include/media/IMediaMetadataRetriever.h1
-rw-r--r--include/media/IMediaPlayerClient.h1
-rw-r--r--include/media/IMediaRecorderClient.h1
-rw-r--r--include/media/JetPlayer.h10
-rw-r--r--include/media/MediaMetadataRetrieverInterface.h3
-rw-r--r--include/media/MediaProfiles.h1
-rw-r--r--include/media/MemoryLeakTrackUtil.h1
-rw-r--r--include/private/media/VideoFrame.h10
-rw-r--r--media/libmedia/AudioEffect.cpp3
-rw-r--r--media/libmedia/AudioRecord.cpp17
-rw-r--r--media/libmedia/AudioSystem.cpp9
-rw-r--r--media/libmedia/AudioTrack.cpp26
-rw-r--r--media/libmedia/IAudioFlinger.cpp18
-rw-r--r--media/libmedia/IAudioFlingerClient.cpp2
-rw-r--r--media/libmedia/IAudioPolicyService.cpp2
-rw-r--r--media/libmedia/IAudioRecord.cpp29
-rw-r--r--media/libmedia/IAudioTrack.cpp30
-rw-r--r--media/libmedia/IEffect.cpp11
-rw-r--r--media/libmedia/IEffectClient.cpp3
-rw-r--r--media/libmedia/IMediaDeathNotifier.cpp8
-rw-r--r--media/libmedia/IMediaPlayer.cpp2
-rw-r--r--media/libmedia/IMediaPlayerClient.cpp2
-rw-r--r--media/libmedia/IMediaPlayerService.cpp2
-rw-r--r--media/libmedia/IMediaRecorder.cpp2
-rw-r--r--media/libmedia/IMediaRecorderClient.cpp2
-rw-r--r--media/libmedia/JetPlayer.cpp51
-rw-r--r--media/libmedia/MediaProfiles.cpp4
-rw-r--r--media/libmedia/MediaScanner.cpp6
-rw-r--r--media/libmedia/MediaScannerClient.cpp1
-rw-r--r--media/libmedia/Metadata.cpp2
-rw-r--r--media/libmedia/ToneGenerator.cpp11
-rw-r--r--media/libmedia/Visualizer.cpp3
-rw-r--r--media/libmedia/mediametadataretriever.cpp2
-rw-r--r--media/libmedia/mediaplayer.cpp14
-rw-r--r--media/libmedia/mediarecorder.cpp34
-rw-r--r--services/audioflinger/AudioFlinger.cpp211
-rw-r--r--services/audioflinger/AudioFlinger.h10
-rw-r--r--services/audioflinger/AudioPolicyService.cpp50
-rw-r--r--services/audioflinger/AudioPolicyService.h4
-rw-r--r--services/audioflinger/AudioResampler.cpp2
-rw-r--r--services/audioflinger/AudioResampler.h2
-rw-r--r--services/audioflinger/AudioResamplerCubic.h2
-rw-r--r--services/audioflinger/AudioResamplerSinc.cpp4
49 files changed, 430 insertions, 445 deletions
diff --git a/core/jni/android_media_AudioRecord.cpp b/core/jni/android_media_AudioRecord.cpp
index b34dbce..7984b9c 100644
--- a/core/jni/android_media_AudioRecord.cpp
+++ b/core/jni/android_media_AudioRecord.cpp
@@ -72,7 +72,7 @@ Mutex sLock;
#define AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED -20
jint android_media_translateRecorderErrorCode(int code) {
- switch(code) {
+ switch (code) {
case NO_ERROR:
return AUDIORECORD_SUCCESS;
case BAD_VALUE:
@@ -81,7 +81,7 @@ jint android_media_translateRecorderErrorCode(int code) {
return AUDIORECORD_ERROR_INVALID_OPERATION;
default:
return AUDIORECORD_ERROR;
- }
+ }
}
@@ -90,14 +90,14 @@ static void recorderCallback(int event, void* user, void *info) {
if (event == AudioRecord::EVENT_MORE_DATA) {
// set size to 0 to signal we're not using the callback to read more data
AudioRecord::Buffer* pBuff = (AudioRecord::Buffer*)info;
- pBuff->size = 0;
-
+ pBuff->size = 0;
+
} else if (event == AudioRecord::EVENT_MARKER) {
audiorecord_callback_cookie *callbackInfo = (audiorecord_callback_cookie *)user;
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
- callbackInfo->audioRecord_class,
+ callbackInfo->audioRecord_class,
javaAudioRecordFields.postNativeEventInJava,
callbackInfo->audioRecord_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
@@ -111,7 +111,7 @@ static void recorderCallback(int event, void* user, void *info) {
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
- callbackInfo->audioRecord_class,
+ callbackInfo->audioRecord_class,
javaAudioRecordFields.postNativeEventInJava,
callbackInfo->audioRecord_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
@@ -140,7 +140,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
uint32_t nbChannels = popcount(channels);
// compare the format against the Java constants
- if ((audioFormat != javaAudioRecordFields.PCM16)
+ if ((audioFormat != javaAudioRecordFields.PCM16)
&& (audioFormat != javaAudioRecordFields.PCM8)) {
ALOGE("Error creating AudioRecord: unsupported audio format.");
return AUDIORECORD_ERROR_SETUP_INVALIDFORMAT;
@@ -156,7 +156,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
}
int frameSize = nbChannels * bytesPerSample;
size_t frameCount = buffSizeInBytes / frameSize;
-
+
if (uint32_t(source) >= AUDIO_SOURCE_CNT) {
ALOGE("Error creating AudioRecord: unknown source.");
return AUDIORECORD_ERROR_SETUP_INVALIDSOURCE;
@@ -181,11 +181,11 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
// create an uninitialized AudioRecord object
lpRecorder = new AudioRecord();
- if(lpRecorder == NULL) {
+ if (lpRecorder == NULL) {
ALOGE("Error creating AudioRecord instance.");
return AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}
-
+
// create the callback information:
// this data will be passed with every AudioRecord callback
jclass clazz = env->GetObjectClass(thiz);
@@ -197,7 +197,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);
// we use a weak reference so the AudioRecord object can be garbage collected.
lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);
-
+
lpRecorder->set((audio_source_t) source,
sampleRateInHertz,
format, // word length, PCM
@@ -210,7 +210,7 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
true, // threadCanCallJava)
sessionId);
- if(lpRecorder->initCheck() != NO_ERROR) {
+ if (lpRecorder->initCheck() != NO_ERROR) {
ALOGE("Error creating AudioRecord instance: initialization check failed.");
goto native_init_failure;
}
@@ -225,16 +225,16 @@ android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
- // save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field
+ // save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field
// of the Java object
env->SetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj, (int)lpRecorder);
-
+
// save our newly created callback information in the "nativeCallbackCookie" field
// of the Java object (in mNativeCallbackCookie) so we can free the memory in finalize()
env->SetIntField(thiz, javaAudioRecordFields.nativeCallbackCookie, (int)lpCallbackData);
-
+
return AUDIORECORD_SUCCESS;
-
+
// failure:
native_init_failure:
env->DeleteGlobalRef(lpCallbackData->audioRecord_class);
@@ -246,7 +246,7 @@ native_track_failure:
env->SetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj, 0);
env->SetIntField(thiz, javaAudioRecordFields.nativeCallbackCookie, 0);
-
+
return AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}
@@ -256,13 +256,13 @@ native_track_failure:
static int
android_media_AudioRecord_start(JNIEnv *env, jobject thiz)
{
- AudioRecord *lpRecorder =
+ AudioRecord *lpRecorder =
(AudioRecord *)env->GetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
if (lpRecorder == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
return AUDIORECORD_ERROR;
}
-
+
return android_media_translateRecorderErrorCode(lpRecorder->start());
}
@@ -271,7 +271,7 @@ android_media_AudioRecord_start(JNIEnv *env, jobject thiz)
static void
android_media_AudioRecord_stop(JNIEnv *env, jobject thiz)
{
- AudioRecord *lpRecorder =
+ AudioRecord *lpRecorder =
(AudioRecord *)env->GetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
if (lpRecorder == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
@@ -288,7 +288,7 @@ static void android_media_AudioRecord_release(JNIEnv *env, jobject thiz) {
// serialize access. Ugly, but functional.
Mutex::Autolock lock(&sLock);
- AudioRecord *lpRecorder =
+ AudioRecord *lpRecorder =
(AudioRecord *)env->GetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
audiorecord_callback_cookie *lpCookie = (audiorecord_callback_cookie *)env->GetIntField(
thiz, javaAudioRecordFields.nativeCallbackCookie);
@@ -304,7 +304,7 @@ static void android_media_AudioRecord_release(JNIEnv *env, jobject thiz) {
lpRecorder->stop();
delete lpRecorder;
}
-
+
// delete the callback information
if (lpCookie) {
ALOGV("deleting lpCookie: %x\n", (int)lpCookie);
@@ -329,7 +329,7 @@ static jint android_media_AudioRecord_readInByteArray(JNIEnv *env, jobject thiz
AudioRecord *lpRecorder = NULL;
// get the audio recorder from which we'll read new audio samples
- lpRecorder =
+ lpRecorder =
(AudioRecord *)env->GetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
if (lpRecorder == NULL) {
ALOGE("Unable to retrieve AudioRecord object, can't record");
@@ -355,8 +355,8 @@ static jint android_media_AudioRecord_readInByteArray(JNIEnv *env, jobject thiz
// read the new audio data from the native AudioRecord object
ssize_t recorderBuffSize = lpRecorder->frameCount()*lpRecorder->frameSize();
- ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes,
- sizeInBytes > (jint)recorderBuffSize ?
+ ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes,
+ sizeInBytes > (jint)recorderBuffSize ?
(jint)recorderBuffSize : sizeInBytes );
env->ReleaseByteArrayElements(javaAudioData, recordBuff, 0);
@@ -381,41 +381,41 @@ static jint android_media_AudioRecord_readInDirectBuffer(JNIEnv *env, jobject t
//ALOGV("Entering android_media_AudioRecord_readInBuffer");
// get the audio recorder from which we'll read new audio samples
- lpRecorder =
+ lpRecorder =
(AudioRecord *)env->GetIntField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
- if(lpRecorder==NULL)
+ if (lpRecorder==NULL)
return 0;
// direct buffer and direct access supported?
long capacity = env->GetDirectBufferCapacity(jBuffer);
- if(capacity == -1) {
+ if (capacity == -1) {
// buffer direct access is not supported
ALOGE("Buffer direct access is not supported, can't record");
return 0;
}
//ALOGV("capacity = %ld", capacity);
jbyte* nativeFromJavaBuf = (jbyte*) env->GetDirectBufferAddress(jBuffer);
- if(nativeFromJavaBuf==NULL) {
+ if (nativeFromJavaBuf==NULL) {
ALOGE("Buffer direct access is not supported, can't record");
return 0;
- }
+ }
// read new data from the recorder
- return (jint) lpRecorder->read(nativeFromJavaBuf,
+ return (jint) lpRecorder->read(nativeFromJavaBuf,
capacity < sizeInBytes ? capacity : sizeInBytes);
}
// ----------------------------------------------------------------------------
-static jint android_media_AudioRecord_set_marker_pos(JNIEnv *env, jobject thiz,
+static jint android_media_AudioRecord_set_marker_pos(JNIEnv *env, jobject thiz,
jint markerPos) {
-
+
AudioRecord *lpRecorder = (AudioRecord *)env->GetIntField(
thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
-
+
if (lpRecorder) {
- return
- android_media_translateRecorderErrorCode( lpRecorder->setMarkerPosition(markerPos) );
+ return
+ android_media_translateRecorderErrorCode( lpRecorder->setMarkerPosition(markerPos) );
} else {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioRecord pointer for setMarkerPosition()");
@@ -426,11 +426,11 @@ static jint android_media_AudioRecord_set_marker_pos(JNIEnv *env, jobject thiz,
// ----------------------------------------------------------------------------
static jint android_media_AudioRecord_get_marker_pos(JNIEnv *env, jobject thiz) {
-
+
AudioRecord *lpRecorder = (AudioRecord *)env->GetIntField(
thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
uint32_t markerPos = 0;
-
+
if (lpRecorder) {
lpRecorder->getMarkerPosition(&markerPos);
return (jint)markerPos;
@@ -445,28 +445,28 @@ static jint android_media_AudioRecord_get_marker_pos(JNIEnv *env, jobject thiz)
// ----------------------------------------------------------------------------
static jint android_media_AudioRecord_set_pos_update_period(JNIEnv *env, jobject thiz,
jint period) {
-
+
AudioRecord *lpRecorder = (AudioRecord *)env->GetIntField(
thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
-
+
if (lpRecorder) {
- return
- android_media_translateRecorderErrorCode( lpRecorder->setPositionUpdatePeriod(period) );
+ return
+ android_media_translateRecorderErrorCode( lpRecorder->setPositionUpdatePeriod(period) );
} else {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioRecord pointer for setPositionUpdatePeriod()");
return AUDIORECORD_ERROR;
- }
+ }
}
// ----------------------------------------------------------------------------
static jint android_media_AudioRecord_get_pos_update_period(JNIEnv *env, jobject thiz) {
-
+
AudioRecord *lpRecorder = (AudioRecord *)env->GetIntField(
thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
uint32_t period = 0;
-
+
if (lpRecorder) {
lpRecorder->getPositionUpdatePeriod(&period);
return (jint)period;
@@ -514,7 +514,7 @@ static JNINativeMethod gMethods[] = {
(void *)android_media_AudioRecord_setup},
{"native_finalize", "()V", (void *)android_media_AudioRecord_finalize},
{"native_release", "()V", (void *)android_media_AudioRecord_release},
- {"native_read_in_byte_array",
+ {"native_read_in_byte_array",
"([BII)I", (void *)android_media_AudioRecord_readInByteArray},
{"native_read_in_short_array",
"([SII)I", (void *)android_media_AudioRecord_readInShortArray},
@@ -541,7 +541,7 @@ static JNINativeMethod gMethods[] = {
// ----------------------------------------------------------------------------
-extern bool android_media_getIntConstantFromClass(JNIEnv* pEnv,
+extern bool android_media_getIntConstantFromClass(JNIEnv* pEnv,
jclass theClass, const char* className, const char* constName, int* constVal);
// ----------------------------------------------------------------------------
@@ -550,7 +550,7 @@ int register_android_media_AudioRecord(JNIEnv *env)
javaAudioRecordFields.postNativeEventInJava = NULL;
javaAudioRecordFields.nativeRecorderInJavaObj = NULL;
javaAudioRecordFields.nativeCallbackCookie = NULL;
-
+
// Get the AudioRecord class
jclass audioRecordClass = env->FindClass(kClassPathName);
@@ -569,7 +569,7 @@ int register_android_media_AudioRecord(JNIEnv *env)
// Get the variables
// mNativeRecorderInJavaObj
- javaAudioRecordFields.nativeRecorderInJavaObj =
+ javaAudioRecordFields.nativeRecorderInJavaObj =
env->GetFieldID(audioRecordClass,
JAVA_NATIVERECORDERINJAVAOBJ_FIELD_NAME, "I");
if (javaAudioRecordFields.nativeRecorderInJavaObj == NULL) {
@@ -592,13 +592,13 @@ int register_android_media_AudioRecord(JNIEnv *env)
ALOGE("Can't find %s", JAVA_AUDIOFORMAT_CLASS_NAME);
return -1;
}
- if ( !android_media_getIntConstantFromClass(env, audioFormatClass,
- JAVA_AUDIOFORMAT_CLASS_NAME,
+ if ( !android_media_getIntConstantFromClass(env, audioFormatClass,
+ JAVA_AUDIOFORMAT_CLASS_NAME,
JAVA_CONST_PCM16_NAME, &(javaAudioRecordFields.PCM16))
- || !android_media_getIntConstantFromClass(env, audioFormatClass,
- JAVA_AUDIOFORMAT_CLASS_NAME,
+ || !android_media_getIntConstantFromClass(env, audioFormatClass,
+ JAVA_AUDIOFORMAT_CLASS_NAME,
JAVA_CONST_PCM8_NAME, &(javaAudioRecordFields.PCM8)) ) {
- // error log performed in getIntConstantFromClass()
+ // error log performed in getIntConstantFromClass()
return -1;
}
diff --git a/core/jni/android_media_AudioSystem.cpp b/core/jni/android_media_AudioSystem.cpp
index ee5eb7e..bff5994 100644
--- a/core/jni/android_media_AudioSystem.cpp
+++ b/core/jni/android_media_AudioSystem.cpp
@@ -268,7 +268,7 @@ static JNINativeMethod gMethods[] = {
int register_android_media_AudioSystem(JNIEnv *env)
{
AudioSystem::setErrorCallback(android_media_AudioSystem_error_callback);
-
+
return AndroidRuntime::registerNativeMethods(env,
kClassPathName, gMethods, NELEM(gMethods));
}
diff --git a/core/jni/android_media_AudioTrack.cpp b/core/jni/android_media_AudioTrack.cpp
index 57f5d3d..d4ed06c 100644
--- a/core/jni/android_media_AudioTrack.cpp
+++ b/core/jni/android_media_AudioTrack.cpp
@@ -105,7 +105,7 @@ class AudioTrackJniStorage {
jint android_media_translateErrorCode(int code) {
- switch(code) {
+ switch (code) {
case NO_ERROR:
return AUDIOTRACK_SUCCESS;
case BAD_VALUE:
@@ -114,7 +114,7 @@ jint android_media_translateErrorCode(int code) {
return AUDIOTRACK_ERROR_INVALID_OPERATION;
default:
return AUDIOTRACK_ERROR;
- }
+ }
}
@@ -123,14 +123,14 @@ static void audioCallback(int event, void* user, void *info) {
if (event == AudioTrack::EVENT_MORE_DATA) {
// set size to 0 to signal we're not using the callback to write more data
AudioTrack::Buffer* pBuff = (AudioTrack::Buffer*)info;
- pBuff->size = 0;
-
+ pBuff->size = 0;
+
} else if (event == AudioTrack::EVENT_MARKER) {
audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
- callbackInfo->audioTrack_class,
+ callbackInfo->audioTrack_class,
javaAudioTrackFields.postNativeEventInJava,
callbackInfo->audioTrack_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
@@ -144,7 +144,7 @@ static void audioCallback(int event, void* user, void *info) {
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
- callbackInfo->audioTrack_class,
+ callbackInfo->audioTrack_class,
javaAudioTrackFields.postNativeEventInJava,
callbackInfo->audioTrack_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
@@ -186,7 +186,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
}
int nbChannels = popcount(nativeChannelMask);
-
+
// check the stream type
audio_stream_type_t atStreamType;
switch (streamType) {
@@ -215,7 +215,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
// for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class
// so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled
// in android_media_AudioTrack_native_write_byte()
- if ((audioFormat == javaAudioTrackFields.PCM8)
+ if ((audioFormat == javaAudioTrackFields.PCM8)
&& (memoryMode == javaAudioTrackFields.MODE_STATIC)) {
ALOGV("android_media_AudioTrack_native_setup(): requesting MODE_STATIC for 8bit \
buff size of %dbytes, switching to 16bit, buff size of %dbytes",
@@ -230,9 +230,9 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
audio_format_t format = audioFormat == javaAudioTrackFields.PCM16 ?
AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_8_BIT;
int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);
-
+
AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
-
+
// initialize the callback information:
// this data will be passed with every AudioTrack callback
jclass clazz = env->GetObjectClass(thiz);
@@ -244,7 +244,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
// we use a weak reference so the AudioTrack object can be garbage collected.
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
-
+
lpJniStorage->mStreamType = atStreamType;
if (jSession == NULL) {
@@ -269,7 +269,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
ALOGE("Error creating uninitialized AudioTrack");
goto native_track_failure;
}
-
+
// initialize the native AudioTrack object
if (memoryMode == javaAudioTrackFields.MODE_STREAM) {
@@ -285,15 +285,15 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
0,// shared mem
true,// thread can call Java
sessionId);// audio session ID
-
+
} else if (memoryMode == javaAudioTrackFields.MODE_STATIC) {
// AudioTrack is using shared memory
-
+
if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}
-
+
lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
@@ -302,7 +302,7 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
frameCount,
AUDIO_POLICY_OUTPUT_FLAG_NONE,
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
- 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
+ 0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
lpJniStorage->mMemBase,// shared mem
true,// thread can call Java
sessionId);// audio session ID
@@ -323,21 +323,21 @@ android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_th
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
- // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
+ // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
// of the Java object (in mNativeTrackInJavaObj)
env->SetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, (int)lpTrack);
-
+
// save the JNI resources so we can free them later
//ALOGV("storing lpJniStorage: %x\n", (int)lpJniStorage);
env->SetIntField(thiz, javaAudioTrackFields.jniData, (int)lpJniStorage);
return AUDIOTRACK_SUCCESS;
-
+
// failures:
native_init_failure:
delete lpTrack;
env->SetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, 0);
-
+
native_track_failure:
if (nSession != NULL) {
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
@@ -347,7 +347,7 @@ native_track_failure:
delete lpJniStorage;
env->SetIntField(thiz, javaAudioTrackFields.jniData, 0);
return AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
-
+
}
@@ -432,7 +432,7 @@ android_media_AudioTrack_set_volume(JNIEnv *env, jobject thiz, jfloat leftVol, j
// ----------------------------------------------------------------------------
static void android_media_AudioTrack_native_finalize(JNIEnv *env, jobject thiz) {
//ALOGV("android_media_AudioTrack_native_finalize jobject: %x\n", (int)thiz);
-
+
// delete the AudioTrack object
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
@@ -441,7 +441,7 @@ static void android_media_AudioTrack_native_finalize(JNIEnv *env, jobject thiz)
lpTrack->stop();
delete lpTrack;
}
-
+
// delete the JNI data
AudioTrackJniStorage* pJniStorage = (AudioTrackJniStorage *)env->GetIntField(
thiz, javaAudioTrackFields.jniData);
@@ -456,7 +456,7 @@ static void android_media_AudioTrack_native_finalize(JNIEnv *env, jobject thiz)
// ----------------------------------------------------------------------------
static void android_media_AudioTrack_native_release(JNIEnv *env, jobject thiz) {
-
+
// do everything a call to finalize would
android_media_AudioTrack_native_finalize(env, thiz);
// + reset the native resources in the Java object so any attempt to access
@@ -493,7 +493,7 @@ jint writeToTrack(AudioTrack* pTrack, jint audioFormat, jbyte* data,
int count = sizeInBytes;
int16_t *dst = (int16_t *)pTrack->sharedBuffer()->pointer();
const int8_t *src = (const int8_t *)(data + offsetInBytes);
- while(count--) {
+ while (count--) {
*dst++ = (int16_t)(*src++^0x80) << 8;
}
// even though we wrote 2*sizeInBytes, we only report sizeInBytes as written to hide
@@ -514,7 +514,7 @@ static jint android_media_AudioTrack_native_write_byte(JNIEnv *env, jobject thi
AudioTrack *lpTrack = NULL;
//ALOGV("android_media_AudioTrack_native_write_byte(offset=%d, sizeInBytes=%d) called",
// offsetInBytes, sizeInBytes);
-
+
// get the audio track to load with samples
lpTrack = (AudioTrack *)env->GetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
if (lpTrack == NULL) {
@@ -599,7 +599,7 @@ static jint android_media_AudioTrack_get_playback_rate(JNIEnv *env, jobject thi
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
if (lpTrack) {
- return (jint) lpTrack->getSampleRate();
+ return (jint) lpTrack->getSampleRate();
} else {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for getSampleRate()");
@@ -609,14 +609,14 @@ static jint android_media_AudioTrack_get_playback_rate(JNIEnv *env, jobject thi
// ----------------------------------------------------------------------------
-static jint android_media_AudioTrack_set_marker_pos(JNIEnv *env, jobject thiz,
+static jint android_media_AudioTrack_set_marker_pos(JNIEnv *env, jobject thiz,
jint markerPos) {
-
+
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
-
+
if (lpTrack) {
- return android_media_translateErrorCode( lpTrack->setMarkerPosition(markerPos) );
+ return android_media_translateErrorCode( lpTrack->setMarkerPosition(markerPos) );
} else {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for setMarkerPosition()");
@@ -627,11 +627,11 @@ static jint android_media_AudioTrack_set_marker_pos(JNIEnv *env, jobject thiz,
// ----------------------------------------------------------------------------
static jint android_media_AudioTrack_get_marker_pos(JNIEnv *env, jobject thiz) {
-
+
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
uint32_t markerPos = 0;
-
+
if (lpTrack) {
lpTrack->getMarkerPosition(&markerPos);
return (jint)markerPos;
@@ -646,27 +646,27 @@ static jint android_media_AudioTrack_get_marker_pos(JNIEnv *env, jobject thiz)
// ----------------------------------------------------------------------------
static jint android_media_AudioTrack_set_pos_update_period(JNIEnv *env, jobject thiz,
jint period) {
-
+
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
-
+
if (lpTrack) {
- return android_media_translateErrorCode( lpTrack->setPositionUpdatePeriod(period) );
+ return android_media_translateErrorCode( lpTrack->setPositionUpdatePeriod(period) );
} else {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for setPositionUpdatePeriod()");
return AUDIOTRACK_ERROR;
- }
+ }
}
// ----------------------------------------------------------------------------
static jint android_media_AudioTrack_get_pos_update_period(JNIEnv *env, jobject thiz) {
-
+
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
uint32_t period = 0;
-
+
if (lpTrack) {
lpTrack->getPositionUpdatePeriod(&period);
return (jint)period;
@@ -679,12 +679,12 @@ static jint android_media_AudioTrack_get_pos_update_period(JNIEnv *env, jobject
// ----------------------------------------------------------------------------
-static jint android_media_AudioTrack_set_position(JNIEnv *env, jobject thiz,
+static jint android_media_AudioTrack_set_position(JNIEnv *env, jobject thiz,
jint position) {
-
+
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
-
+
if (lpTrack) {
return android_media_translateErrorCode( lpTrack->setPosition(position) );
} else {
@@ -697,11 +697,11 @@ static jint android_media_AudioTrack_set_position(JNIEnv *env, jobject thiz,
// ----------------------------------------------------------------------------
static jint android_media_AudioTrack_get_position(JNIEnv *env, jobject thiz) {
-
+
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
uint32_t position = 0;
-
+
if (lpTrack) {
lpTrack->getPosition(&position);
return (jint)position;
@@ -944,12 +944,12 @@ int register_android_media_AudioTrack(JNIEnv *env)
// Get the memory mode constants
if ( !android_media_getIntConstantFromClass(env, audioTrackClass,
- kClassPathName,
+ kClassPathName,
JAVA_CONST_MODE_STATIC_NAME, &(javaAudioTrackFields.MODE_STATIC))
|| !android_media_getIntConstantFromClass(env, audioTrackClass,
- kClassPathName,
+ kClassPathName,
JAVA_CONST_MODE_STREAM_NAME, &(javaAudioTrackFields.MODE_STREAM)) ) {
- // error log performed in android_media_getIntConstantFromClass()
+ // error log performed in android_media_getIntConstantFromClass()
return -1;
}
@@ -960,16 +960,16 @@ int register_android_media_AudioTrack(JNIEnv *env)
ALOGE("Can't find %s", JAVA_AUDIOFORMAT_CLASS_NAME);
return -1;
}
- if ( !android_media_getIntConstantFromClass(env, audioFormatClass,
- JAVA_AUDIOFORMAT_CLASS_NAME,
+ if ( !android_media_getIntConstantFromClass(env, audioFormatClass,
+ JAVA_AUDIOFORMAT_CLASS_NAME,
JAVA_CONST_PCM16_NAME, &(javaAudioTrackFields.PCM16))
- || !android_media_getIntConstantFromClass(env, audioFormatClass,
- JAVA_AUDIOFORMAT_CLASS_NAME,
+ || !android_media_getIntConstantFromClass(env, audioFormatClass,
+ JAVA_AUDIOFORMAT_CLASS_NAME,
JAVA_CONST_PCM8_NAME, &(javaAudioTrackFields.PCM8)) ) {
- // error log performed in android_media_getIntConstantFromClass()
+ // error log performed in android_media_getIntConstantFromClass()
return -1;
}
-
+
return AndroidRuntime::registerNativeMethods(env, kClassPathName, gMethods, NELEM(gMethods));
}
diff --git a/core/jni/android_media_JetPlayer.cpp b/core/jni/android_media_JetPlayer.cpp
index 9f9bedb..6fedc6b 100644
--- a/core/jni/android_media_JetPlayer.cpp
+++ b/core/jni/android_media_JetPlayer.cpp
@@ -56,7 +56,7 @@ static void
jetPlayerEventCallback(int what, int arg1=0, int arg2=0, void* javaTarget = NULL)
{
JNIEnv *env = AndroidRuntime::getJNIEnv();
- if(env) {
+ if (env) {
env->CallStaticVoidMethod(
javaJetPlayerFields.jetClass, javaJetPlayerFields.postNativeEventInJava,
javaTarget,
@@ -84,7 +84,7 @@ android_media_JetPlayer_setup(JNIEnv *env, jobject thiz, jobject weak_this,
EAS_RESULT result = lpJet->init();
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
// save our newly created C++ JetPlayer in the "nativePlayerInJavaObj" field
// of the Java object (in mNativePlayerInJavaObj)
env->SetIntField(thiz, javaJetPlayerFields.nativePlayerInJavaObj, (int)lpJet);
@@ -105,7 +105,7 @@ android_media_JetPlayer_finalize(JNIEnv *env, jobject thiz)
ALOGV("android_media_JetPlayer_finalize(): entering.");
JetPlayer *lpJet = (JetPlayer *)env->GetIntField(
thiz, javaJetPlayerFields.nativePlayerInJavaObj);
- if(lpJet != NULL) {
+ if (lpJet != NULL) {
lpJet->release();
delete lpJet;
}
@@ -148,7 +148,7 @@ android_media_JetPlayer_loadFromFile(JNIEnv *env, jobject thiz, jstring path)
EAS_RESULT result = lpJet->loadFromFile(pathStr);
env->ReleaseStringUTFChars(path, pathStr);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_openFile(): file successfully opened");
return JNI_TRUE;
} else {
@@ -178,7 +178,7 @@ android_media_JetPlayer_loadFromFileD(JNIEnv *env, jobject thiz,
EAS_RESULT result = lpJet->loadFromFD(jniGetFDFromFileDescriptor(env, fileDescriptor),
(long long)offset, (long long)length); // cast params to types used by EAS_FILE
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
ALOGV("android_media_JetPlayer_openFileDescr(): file successfully opened");
return JNI_TRUE;
} else {
@@ -200,7 +200,7 @@ android_media_JetPlayer_closeFile(JNIEnv *env, jobject thiz)
"Unable to retrieve JetPlayer pointer for closeFile()");
}
- if( lpJet->closeFile()==EAS_SUCCESS) {
+ if (lpJet->closeFile()==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_closeFile(): file successfully closed");
return JNI_TRUE;
} else {
@@ -222,7 +222,7 @@ android_media_JetPlayer_play(JNIEnv *env, jobject thiz)
}
EAS_RESULT result = lpJet->play();
- if( result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_play(): play successful");
return JNI_TRUE;
} else {
@@ -245,11 +245,11 @@ android_media_JetPlayer_pause(JNIEnv *env, jobject thiz)
}
EAS_RESULT result = lpJet->pause();
- if( result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_pause(): pause successful");
return JNI_TRUE;
} else {
- if(result==EAS_ERROR_QUEUE_IS_EMPTY) {
+ if (result==EAS_ERROR_QUEUE_IS_EMPTY) {
ALOGV("android_media_JetPlayer_pause(): paused with an empty queue");
return JNI_TRUE;
} else
@@ -275,7 +275,7 @@ android_media_JetPlayer_queueSegment(JNIEnv *env, jobject thiz,
EAS_RESULT result
= lpJet->queueSegment(segmentNum, libNum, repeatCount, transpose, muteFlags, userID);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_queueSegment(): segment successfully queued");
return JNI_TRUE;
} else {
@@ -311,7 +311,7 @@ android_media_JetPlayer_queueSegmentMuteArray(JNIEnv *env, jobject thiz,
EAS_U32 muteMask=0;
int maxTracks = lpJet->getMaxTracks();
for (jint trackIndex=0; trackIndex<maxTracks; trackIndex++) {
- if(muteTracks[maxTracks-1-trackIndex]==JNI_TRUE)
+ if (muteTracks[maxTracks-1-trackIndex]==JNI_TRUE)
muteMask = (muteMask << 1) | 0x00000001;
else
muteMask = muteMask << 1;
@@ -321,7 +321,7 @@ android_media_JetPlayer_queueSegmentMuteArray(JNIEnv *env, jobject thiz,
result = lpJet->queueSegment(segmentNum, libNum, repeatCount, transpose, muteMask, userID);
env->ReleaseBooleanArrayElements(muteArray, muteTracks, 0);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_queueSegmentMuteArray(): segment successfully queued");
return JNI_TRUE;
} else {
@@ -346,7 +346,7 @@ android_media_JetPlayer_setMuteFlags(JNIEnv *env, jobject thiz,
EAS_RESULT result;
result = lpJet->setMuteFlags(muteFlags, bSync==JNI_TRUE ? true : false);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_setMuteFlags(): mute flags successfully updated");
return JNI_TRUE;
} else {
@@ -380,7 +380,7 @@ android_media_JetPlayer_setMuteArray(JNIEnv *env, jobject thiz,
EAS_U32 muteMask=0;
int maxTracks = lpJet->getMaxTracks();
for (jint trackIndex=0; trackIndex<maxTracks; trackIndex++) {
- if(muteTracks[maxTracks-1-trackIndex]==JNI_TRUE)
+ if (muteTracks[maxTracks-1-trackIndex]==JNI_TRUE)
muteMask = (muteMask << 1) | 0x00000001;
else
muteMask = muteMask << 1;
@@ -390,7 +390,7 @@ android_media_JetPlayer_setMuteArray(JNIEnv *env, jobject thiz,
result = lpJet->setMuteFlags(muteMask, bSync==JNI_TRUE ? true : false);
env->ReleaseBooleanArrayElements(muteArray, muteTracks, 0);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_setMuteArray(): mute flags successfully updated");
return JNI_TRUE;
} else {
@@ -416,7 +416,7 @@ android_media_JetPlayer_setMuteFlag(JNIEnv *env, jobject thiz,
EAS_RESULT result;
result = lpJet->setMuteFlag(trackId,
muteFlag==JNI_TRUE ? true : false, bSync==JNI_TRUE ? true : false);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_setMuteFlag(): mute flag successfully updated for track %d", trackId);
return JNI_TRUE;
} else {
@@ -440,7 +440,7 @@ android_media_JetPlayer_triggerClip(JNIEnv *env, jobject thiz, jint clipId)
EAS_RESULT result;
result = lpJet->triggerClip(clipId);
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_triggerClip(): triggerClip successful for clip %d", clipId);
return JNI_TRUE;
} else {
@@ -463,7 +463,7 @@ android_media_JetPlayer_clearQueue(JNIEnv *env, jobject thiz)
}
EAS_RESULT result = lpJet->clearQueue();
- if(result==EAS_SUCCESS) {
+ if (result==EAS_SUCCESS) {
//ALOGV("android_media_JetPlayer_clearQueue(): clearQueue successful");
return JNI_TRUE;
} else {
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 7869020..089be3b 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -32,7 +32,7 @@ namespace android {
class IAudioRecord : public IInterface
{
-public:
+public:
DECLARE_META_INTERFACE(AudioRecord);
/* After it's created the track is not active. Call start() to
@@ -42,13 +42,13 @@ public:
virtual status_t start(pid_t tid) = 0;
/* Stop a track. If set, the callback will cease being called and
- * obtainBuffer will return an error. Buffers that are already released
+ * obtainBuffer will return an error. Buffers that are already released
* will be processed, unless flush() is called.
*/
virtual void stop() = 0;
/* get this tracks control block */
- virtual sp<IMemory> getCblk() const = 0;
+ virtual sp<IMemory> getCblk() const = 0;
};
// ----------------------------------------------------------------------------
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 77f3e21..577b095 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -32,7 +32,7 @@ namespace android {
class IAudioTrack : public IInterface
{
-public:
+public:
DECLARE_META_INTERFACE(AudioTrack);
/* Get this track's control block */
@@ -45,7 +45,7 @@ public:
virtual status_t start(pid_t tid) = 0;
/* Stop a track. If set, the callback will cease being called and
- * obtainBuffer will return an error. Buffers that are already released
+ * obtainBuffer will return an error. Buffers that are already released
* will continue to be processed, unless/until flush() is called.
*/
virtual void stop() = 0;
@@ -59,9 +59,9 @@ public:
* While muted, the callback, if set, is still called.
*/
virtual void mute(bool) = 0;
-
+
/* Pause a track. If set, the callback will cease being called and
- * obtainBuffer will return an error. Buffers that are already released
+ * obtainBuffer will return an error. Buffers that are already released
* will continue to be processed, unless/until flush() is called.
*/
virtual void pause() = 0;
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index 1c1c268..6dbb2d7 100644
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -56,4 +56,3 @@ public:
}; // namespace android
#endif // ANDROID_IMEDIAMETADATARETRIEVER_H
-
diff --git a/include/media/IMediaPlayerClient.h b/include/media/IMediaPlayerClient.h
index daec1c7..8f1843e 100644
--- a/include/media/IMediaPlayerClient.h
+++ b/include/media/IMediaPlayerClient.h
@@ -45,4 +45,3 @@ public:
}; // namespace android
#endif // ANDROID_IMEDIAPLAYERCLIENT_H
-
diff --git a/include/media/IMediaRecorderClient.h b/include/media/IMediaRecorderClient.h
index 0058ef2..e7d0229 100644
--- a/include/media/IMediaRecorderClient.h
+++ b/include/media/IMediaRecorderClient.h
@@ -45,4 +45,3 @@ public:
}; // namespace android
#endif // ANDROID_IMEDIARECORDERCLIENT_H
-
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
index 491a950..9f6ff4c 100644
--- a/include/media/JetPlayer.h
+++ b/include/media/JetPlayer.h
@@ -40,13 +40,13 @@ public:
static const int JET_NUMQUEUEDSEGMENT_UPDATE = 3;
static const int JET_PAUSE_UPDATE = 4;
- JetPlayer(jobject javaJetPlayer,
- int maxTracks = 32,
+ JetPlayer(jobject javaJetPlayer,
+ int maxTracks = 32,
int trackBufferSize = 1200);
~JetPlayer();
int init();
int release();
-
+
int loadFromFile(const char* url);
int loadFromFD(const int fd, const long long offset, const long long length);
int closeFile();
@@ -60,7 +60,7 @@ public:
int clearQueue();
void setEventCallback(jetevent_callback callback);
-
+
int getMaxTracks() { return mMaxTracks; };
@@ -88,7 +88,7 @@ private:
int mMaxTracks; // max number of MIDI tracks, usually 32
EAS_DATA_HANDLE mEasData;
EAS_FILE_LOCATOR mEasJetFileLoc;
- EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer,
+ EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer,
AudioTrack* mAudioTrack; // and we play it in this audio track
int mTrackBufferSize;
S_JET_STATUS mJetStatus;
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index 27b7e4d..ecc3b65 100644
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -1,6 +1,6 @@
/*
**
-** Copyright (C) 2008 The Android Open Source Project
+** Copyright (C) 2008 The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -56,4 +56,3 @@ public:
}; // namespace android
#endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H
-
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 250f267..9fc962c 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -516,4 +516,3 @@ private:
}; // namespace android
#endif // ANDROID_MEDIAPROFILES_H
-
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
index ac0f6b2..d2618aa 100644
--- a/include/media/MemoryLeakTrackUtil.h
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -1,4 +1,3 @@
-
/*
* Copyright 2011, The Android Open Source Project
*
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 3aff0c6..0ecc348 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -26,7 +26,7 @@
namespace android {
// A simple buffer to hold binary data
-class MediaAlbumArt
+class MediaAlbumArt
{
public:
MediaAlbumArt(): mSize(0), mData(0) {}
@@ -57,9 +57,9 @@ public:
fclose(in);
}
- MediaAlbumArt(const MediaAlbumArt& copy) {
- mSize = copy.mSize;
- mData = NULL; // initialize it first
+ MediaAlbumArt(const MediaAlbumArt& copy) {
+ mSize = copy.mSize;
+ mData = NULL; // initialize it first
if (mSize > 0 && copy.mData != NULL) {
mData = new uint8_t[copy.mSize];
if (mData != NULL) {
@@ -89,7 +89,7 @@ class VideoFrame
{
public:
VideoFrame(): mWidth(0), mHeight(0), mDisplayWidth(0), mDisplayHeight(0), mSize(0), mData(0) {}
-
+
VideoFrame(const VideoFrame& copy) {
mWidth = copy.mWidth;
mHeight = copy.mHeight;
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 6808aa2..34451ca 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -174,7 +174,7 @@ AudioEffect::~AudioEffect()
mIEffect->disconnect();
mIEffect->asBinder()->unlinkToDeath(mIEffectClient);
}
- IPCThreadState::self()->flushCommands();
+ IPCThreadState::self()->flushCommands();
}
mIEffect.clear();
mIEffectClient.clear();
@@ -480,4 +480,3 @@ status_t AudioEffect::guidToString(const effect_uuid_t *guid, char *str, size_t
}; // namespace android
-
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 22c3a18..05ade75 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -293,13 +293,13 @@ status_t AudioRecord::start()
return WOULD_BLOCK;
}
}
- }
+ }
AutoMutex lock(mLock);
// acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
// while we are accessing the cblk
- sp <IAudioRecord> audioRecord = mAudioRecord;
- sp <IMemory> iMem = mCblkMemory;
+ sp<IAudioRecord> audioRecord = mAudioRecord;
+ sp<IMemory> iMem = mCblkMemory;
audio_track_cblk_t* cblk = mCblk;
if (mActive == 0) {
mActive = 1;
@@ -638,8 +638,8 @@ ssize_t AudioRecord::read(void* buffer, size_t userSize)
mLock.lock();
// acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
// while we are accessing the cblk
- sp <IAudioRecord> audioRecord = mAudioRecord;
- sp <IMemory> iMem = mCblkMemory;
+ sp<IAudioRecord> audioRecord = mAudioRecord;
+ sp<IMemory> iMem = mCblkMemory;
mLock.unlock();
do {
@@ -684,8 +684,8 @@ bool AudioRecord::processAudioBuffer(const sp<ClientRecordThread>& thread)
mLock.lock();
// acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
// while we are accessing the cblk
- sp <IAudioRecord> audioRecord = mAudioRecord;
- sp <IMemory> iMem = mCblkMemory;
+ sp<IAudioRecord> audioRecord = mAudioRecord;
+ sp<IMemory> iMem = mCblkMemory;
audio_track_cblk_t* cblk = mCblk;
mLock.unlock();
@@ -806,7 +806,7 @@ status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& cblk)
}
}
ALOGV("restoreRecord_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
- result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
+ result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
if (result == NO_ERROR) {
// from now on we switch to the newly created cblk
@@ -843,4 +843,3 @@ status_t AudioRecord::ClientRecordThread::readyToRun()
// -------------------------------------------------------------------------
}; // namespace android
-
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index a1cbf0f..33c7d03 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -59,14 +59,14 @@ const sp<IAudioFlinger>& AudioSystem::get_audio_flinger()
break;
ALOGW("AudioFlinger not published, waiting...");
usleep(500000); // 0.5 s
- } while(true);
+ } while (true);
if (gAudioFlingerClient == NULL) {
gAudioFlingerClient = new AudioFlingerClient();
} else {
if (gAudioErrorCallback) {
gAudioErrorCallback(NO_ERROR);
}
- }
+ }
binder->linkToDeath(gAudioFlingerClient);
gAudioFlinger = interface_cast<IAudioFlinger>(binder);
gAudioFlinger->registerClient(gAudioFlingerClient);
@@ -482,7 +482,7 @@ void AudioSystem::setErrorCallback(audio_error_callback cb) {
}
bool AudioSystem::routedToA2dpOutput(audio_stream_type_t streamType) {
- switch(streamType) {
+ switch (streamType) {
case AUDIO_STREAM_MUSIC:
case AUDIO_STREAM_VOICE_CALL:
case AUDIO_STREAM_BLUETOOTH_SCO:
@@ -512,7 +512,7 @@ const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
break;
ALOGW("AudioPolicyService not published, waiting...");
usleep(500000); // 0.5 s
- } while(true);
+ } while (true);
if (gAudioPolicyServiceClient == NULL) {
gAudioPolicyServiceClient = new AudioPolicyServiceClient();
}
@@ -768,4 +768,3 @@ void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
}
}; // namespace android
-
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 34563ca..048be1d 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -72,7 +72,7 @@ status_t AudioTrack::getMinFrameCount(
if (minBufCount < 2) minBufCount = 2;
*frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
- afFrameCount * minBufCount * sampleRate / afSampleRate;
+ afFrameCount * minBufCount * sampleRate / afSampleRate;
return NO_ERROR;
}
@@ -352,13 +352,13 @@ void AudioTrack::start()
return;
}
}
- }
+ }
AutoMutex lock(mLock);
// acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
// while we are accessing the cblk
- sp <IAudioTrack> audioTrack = mAudioTrack;
- sp <IMemory> iMem = mCblkMemory;
+ sp<IAudioTrack> audioTrack = mAudioTrack;
+ sp<IMemory> iMem = mCblkMemory;
audio_track_cblk_t* cblk = mCblk;
if (!mActive) {
@@ -743,8 +743,8 @@ status_t AudioTrack::createTrack_l(
status_t status;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
- ALOGE("Could not get audioflinger");
- return NO_INIT;
+ ALOGE("Could not get audioflinger");
+ return NO_INIT;
}
int afSampleRate;
@@ -830,7 +830,7 @@ status_t AudioTrack::createTrack_l(
mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
} else {
mCblk->buffers = sharedBuffer->pointer();
- // Force buffer full condition as data is already present in shared memory
+ // Force buffer full condition as data is already present in shared memory
mCblk->stepUser(mCblk->frameCount);
}
@@ -991,8 +991,8 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize)
// acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
// while we are accessing the cblk
mLock.lock();
- sp <IAudioTrack> audioTrack = mAudioTrack;
- sp <IMemory> iMem = mCblkMemory;
+ sp<IAudioTrack> audioTrack = mAudioTrack;
+ sp<IMemory> iMem = mCblkMemory;
mLock.unlock();
ssize_t written = 0;
@@ -1095,8 +1095,8 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
mLock.lock();
// acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
// while we are accessing the cblk
- sp <IAudioTrack> audioTrack = mAudioTrack;
- sp <IMemory> iMem = mCblkMemory;
+ sp<IAudioTrack> audioTrack = mAudioTrack;
+ sp<IMemory> iMem = mCblkMemory;
audio_track_cblk_t* cblk = mCblk;
bool active = mActive;
mLock.unlock();
@@ -1224,7 +1224,7 @@ status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart)
if (!(android_atomic_or(CBLK_RESTORING_ON, &cblk->flags) & CBLK_RESTORING_MSK)) {
ALOGW("dead IAudioTrack, creating a new one from %s TID %d",
- fromStart ? "start()" : "obtainBuffer()", gettid());
+ fromStart ? "start()" : "obtainBuffer()", gettid());
// signal old cblk condition so that other threads waiting for available buffers stop
// waiting now
@@ -1310,7 +1310,7 @@ status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart)
}
}
ALOGV("restoreTrack_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
- result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
+ result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
if (result == NO_ERROR) {
// from now on we switch to the newly created cblk
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 47c261da..07b12e4 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -622,11 +622,11 @@ public:
sp<IEffect> effect;
if (pDesc == NULL) {
- return effect;
- if (status) {
- *status = BAD_VALUE;
- }
- }
+ return effect;
+ if (status) {
+ *status = BAD_VALUE;
+ }
+ }
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(pid);
@@ -679,7 +679,7 @@ IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
status_t BnAudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
pid_t pid = data.readInt32();
@@ -745,7 +745,7 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32( latency((audio_io_handle_t) data.readInt32()) );
return NO_ERROR;
} break;
- case SET_MASTER_VOLUME: {
+ case SET_MASTER_VOLUME: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
reply->writeInt32( setMasterVolume(data.readFloat()) );
return NO_ERROR;
@@ -815,14 +815,14 @@ status_t BnAudioFlinger::onTransact(
String8 keyValuePairs(data.readString8());
reply->writeInt32(setParameters(ioHandle, keyValuePairs));
return NO_ERROR;
- } break;
+ } break;
case GET_PARAMETERS: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
String8 keys(data.readString8());
reply->writeString8(getParameters(ioHandle, keys));
return NO_ERROR;
- } break;
+ } break;
case REGISTER_CLIENT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 1db39a3..4178b29 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -68,7 +68,7 @@ IMPLEMENT_META_INTERFACE(AudioFlingerClient, "android.media.IAudioFlingerClient"
status_t BnAudioFlingerClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case IO_CONFIG_CHANGED: {
CHECK_INTERFACE(IAudioFlingerClient, data, reply);
int event = data.readInt32();
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index da7c124..5040bd9 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -365,7 +365,7 @@ IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService"
status_t BnAudioPolicyService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case SET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_devices_t device =
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 6b473c9..377b9a8 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -2,16 +2,16 @@
**
** Copyright 2007, The Android Open Source Project
**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
**
-** http://www.apache.org/licenses/LICENSE-2.0
+** http://www.apache.org/licenses/LICENSE-2.0
**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
** limitations under the License.
*/
@@ -41,7 +41,7 @@ public:
: BpInterface<IAudioRecord>(impl)
{
}
-
+
virtual status_t start(pid_t tid)
{
Parcel data, reply;
@@ -55,14 +55,14 @@ public:
}
return status;
}
-
+
virtual void stop()
{
Parcel data, reply;
data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
remote()->transact(STOP, data, &reply);
}
-
+
virtual sp<IMemory> getCblk() const
{
Parcel data, reply;
@@ -73,7 +73,7 @@ public:
cblk = interface_cast<IMemory>(reply.readStrongBinder());
}
return cblk;
- }
+ }
};
IMPLEMENT_META_INTERFACE(AudioRecord, "android.media.IAudioRecord");
@@ -83,8 +83,8 @@ IMPLEMENT_META_INTERFACE(AudioRecord, "android.media.IAudioRecord");
status_t BnAudioRecord::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
- case GET_CBLK: {
+ switch (code) {
+ case GET_CBLK: {
CHECK_INTERFACE(IAudioRecord, data, reply);
reply->writeStrongBinder(getCblk()->asBinder());
return NO_ERROR;
@@ -105,4 +105,3 @@ status_t BnAudioRecord::onTransact(
}
}; // namespace android
-
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index 28ebbbf..09f31a7 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -2,16 +2,16 @@
**
** Copyright 2007, The Android Open Source Project
**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
**
-** http://www.apache.org/licenses/LICENSE-2.0
+** http://www.apache.org/licenses/LICENSE-2.0
**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
** limitations under the License.
*/
@@ -48,7 +48,7 @@ public:
: BpInterface<IAudioTrack>(impl)
{
}
-
+
virtual sp<IMemory> getCblk() const
{
Parcel data, reply;
@@ -74,14 +74,14 @@ public:
}
return status;
}
-
+
virtual void stop()
{
Parcel data, reply;
data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
remote()->transact(STOP, data, &reply);
}
-
+
virtual void flush()
{
Parcel data, reply;
@@ -96,14 +96,14 @@ public:
data.writeInt32(e);
remote()->transact(MUTE, data, &reply);
}
-
+
virtual void pause()
{
Parcel data, reply;
data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
remote()->transact(PAUSE, data, &reply);
}
-
+
virtual status_t attachAuxEffect(int effectId)
{
Parcel data, reply;
@@ -172,8 +172,8 @@ IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
status_t BnAudioTrack::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
- case GET_CBLK: {
+ switch (code) {
+ case GET_CBLK: {
CHECK_INTERFACE(IAudioTrack, data, reply);
reply->writeStrongBinder(getCblk()->asBinder());
return NO_ERROR;
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index 5d40cc8..a303a8f 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -129,7 +129,7 @@ IMPLEMENT_META_INTERFACE(Effect, "android.media.IEffect");
status_t BnEffect::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case ENABLE: {
ALOGV("ENABLE");
CHECK_INTERFACE(IEffect, data, reply);
@@ -186,10 +186,10 @@ status_t BnEffect::onTransact(
} break;
case GET_CBLK: {
- CHECK_INTERFACE(IEffect, data, reply);
- reply->writeStrongBinder(getCblk()->asBinder());
- return NO_ERROR;
- } break;
+ CHECK_INTERFACE(IEffect, data, reply);
+ reply->writeStrongBinder(getCblk()->asBinder());
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
@@ -199,4 +199,3 @@ status_t BnEffect::onTransact(
// ----------------------------------------------------------------------------
}; // namespace android
-
diff --git a/media/libmedia/IEffectClient.cpp b/media/libmedia/IEffectClient.cpp
index 4693b45..aef4371 100644
--- a/media/libmedia/IEffectClient.cpp
+++ b/media/libmedia/IEffectClient.cpp
@@ -94,7 +94,7 @@ IMPLEMENT_META_INTERFACE(EffectClient, "android.media.IEffectClient");
status_t BnEffectClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case CONTROL_STATUS_CHANGED: {
ALOGV("CONTROL_STATUS_CHANGED");
CHECK_INTERFACE(IEffectClient, data, reply);
@@ -142,4 +142,3 @@ status_t BnEffectClient::onTransact(
// ----------------------------------------------------------------------------
}; // namespace android
-
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index aeb35a5..9199db6 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -43,10 +43,10 @@ IMediaDeathNotifier::getMediaPlayerService()
binder = sm->getService(String16("media.player"));
if (binder != 0) {
break;
- }
- ALOGW("Media player service not published, waiting...");
- usleep(500000); // 0.5 s
- } while(true);
+ }
+ ALOGW("Media player service not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while (true);
if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index c47fa41..f586b02 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -319,7 +319,7 @@ IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
status_t BnMediaPlayer::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case DISCONNECT: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
disconnect();
diff --git a/media/libmedia/IMediaPlayerClient.cpp b/media/libmedia/IMediaPlayerClient.cpp
index 1f135c4..a670c96 100644
--- a/media/libmedia/IMediaPlayerClient.cpp
+++ b/media/libmedia/IMediaPlayerClient.cpp
@@ -56,7 +56,7 @@ IMPLEMENT_META_INTERFACE(MediaPlayerClient, "android.media.IMediaPlayerClient");
status_t BnMediaPlayerClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case NOTIFY: {
CHECK_INTERFACE(IMediaPlayerClient, data, reply);
int msg = data.readInt32();
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index f5b5cbd..f5fccef 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -132,7 +132,7 @@ IMPLEMENT_META_INTERFACE(MediaPlayerService, "android.media.IMediaPlayerService"
status_t BnMediaPlayerService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case CREATE: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
pid_t pid = data.readInt32();
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 2f4e31a..a710fd7 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -289,7 +289,7 @@ IMPLEMENT_META_INTERFACE(MediaRecorder, "android.media.IMediaRecorder");
status_t BnMediaRecorder::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case RELEASE: {
ALOGV("RELEASE");
CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/IMediaRecorderClient.cpp b/media/libmedia/IMediaRecorderClient.cpp
index ff235c9..e7907e3 100644
--- a/media/libmedia/IMediaRecorderClient.cpp
+++ b/media/libmedia/IMediaRecorderClient.cpp
@@ -53,7 +53,7 @@ IMPLEMENT_META_INTERFACE(MediaRecorderClient, "android.media.IMediaRecorderClien
status_t BnMediaRecorderClient::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
- switch(code) {
+ switch (code) {
case NOTIFY: {
CHECK_INTERFACE(IMediaRecorderClient, data, reply);
int msg = data.readInt32();
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index a85956c..312a493 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -74,14 +74,14 @@ int JetPlayer::init()
// init the EAS library
result = EAS_Init(&mEasData);
- if( result != EAS_SUCCESS) {
+ if (result != EAS_SUCCESS) {
ALOGE("JetPlayer::init(): Error initializing Sonivox EAS library, aborting.");
mState = EAS_STATE_ERROR;
return result;
}
// init the JET library with the default app event controller range
result = JET_Init(mEasData, NULL, sizeof(S_JET_CONFIG));
- if( result != EAS_SUCCESS) {
+ if (result != EAS_SUCCESS) {
ALOGE("JetPlayer::init(): Error initializing JET library, aborting.");
mState = EAS_STATE_ERROR;
return result;
@@ -151,7 +151,7 @@ int JetPlayer::release()
mAudioBuffer = NULL;
}
mEasData = NULL;
-
+
return EAS_SUCCESS;
}
@@ -166,7 +166,7 @@ int JetPlayer::render() {
ALOGV("JetPlayer::render(): entering");
// allocate render buffer
- mAudioBuffer =
+ mAudioBuffer =
new EAS_PCM[pLibConfig->mixBufferSize * pLibConfig->numChannels * MIX_NUM_BUFFERS];
// signal main thread that we started
@@ -177,8 +177,8 @@ int JetPlayer::render() {
mCondition.signal();
}
- while (1) {
-
+ while (1) {
+
mMutex.lock(); // [[[[[[[[ LOCK ---------------------------------------
if (mEasData == NULL) {
@@ -186,20 +186,20 @@ int JetPlayer::render() {
ALOGV("JetPlayer::render(): NULL EAS data, exiting render.");
goto threadExit;
}
-
+
// nothing to render, wait for client thread to wake us up
while (!mRender)
{
ALOGV("JetPlayer::render(): signal wait");
- if (audioStarted) {
- mAudioTrack->pause();
+ if (audioStarted) {
+ mAudioTrack->pause();
// we have to restart the playback once we start rendering again
audioStarted = false;
}
mCondition.wait(mMutex);
ALOGV("JetPlayer::render(): signal rx'd");
}
-
+
// render midi data into the input buffer
int num_output = 0;
EAS_PCM* p = mAudioBuffer;
@@ -210,8 +210,8 @@ int JetPlayer::render() {
}
p += count * pLibConfig->numChannels;
num_output += count * pLibConfig->numChannels * sizeof(EAS_PCM);
-
- // send events that were generated (if any) to the event callback
+
+ // send events that were generated (if any) to the event callback
fireEventsFromJetQueue();
}
@@ -265,9 +265,9 @@ threadExit:
// precondition: mMutex locked
void JetPlayer::fireUpdateOnStatusChange()
{
- if( (mJetStatus.currentUserID != mPreviousJetStatus.currentUserID)
+ if ( (mJetStatus.currentUserID != mPreviousJetStatus.currentUserID)
||(mJetStatus.segmentRepeatCount != mPreviousJetStatus.segmentRepeatCount) ) {
- if(mEventCallback) {
+ if (mEventCallback) {
mEventCallback(
JetPlayer::JET_USERID_UPDATE,
mJetStatus.currentUserID,
@@ -278,8 +278,8 @@ void JetPlayer::fireUpdateOnStatusChange()
mPreviousJetStatus.segmentRepeatCount = mJetStatus.segmentRepeatCount;
}
- if(mJetStatus.numQueuedSegments != mPreviousJetStatus.numQueuedSegments) {
- if(mEventCallback) {
+ if (mJetStatus.numQueuedSegments != mPreviousJetStatus.numQueuedSegments) {
+ if (mEventCallback) {
mEventCallback(
JetPlayer::JET_NUMQUEUEDSEGMENT_UPDATE,
mJetStatus.numQueuedSegments,
@@ -289,8 +289,8 @@ void JetPlayer::fireUpdateOnStatusChange()
mPreviousJetStatus.numQueuedSegments = mJetStatus.numQueuedSegments;
}
- if(mJetStatus.paused != mPreviousJetStatus.paused) {
- if(mEventCallback) {
+ if (mJetStatus.paused != mPreviousJetStatus.paused) {
+ if (mEventCallback) {
mEventCallback(JetPlayer::JET_PAUSE_UPDATE,
mJetStatus.paused,
-1,
@@ -307,7 +307,7 @@ void JetPlayer::fireUpdateOnStatusChange()
// precondition: mMutex locked
void JetPlayer::fireEventsFromJetQueue()
{
- if(!mEventCallback) {
+ if (!mEventCallback) {
// no callback, just empty the event queue
while (JET_GetEvent(mEasData, NULL, NULL)) { }
return;
@@ -341,7 +341,7 @@ int JetPlayer::loadFromFile(const char* path)
mEasJetFileLoc->offset = 0;
EAS_RESULT result = JET_OpenFile(mEasData, mEasJetFileLoc);
- if(result != EAS_SUCCESS)
+ if (result != EAS_SUCCESS)
mState = EAS_STATE_ERROR;
else
mState = EAS_STATE_OPEN;
@@ -353,7 +353,7 @@ int JetPlayer::loadFromFile(const char* path)
int JetPlayer::loadFromFD(const int fd, const long long offset, const long long length)
{
ALOGV("JetPlayer::loadFromFD(): fd=%d offset=%lld length=%lld", fd, offset, length);
-
+
Mutex::Autolock lock(mMutex);
mEasJetFileLoc = (EAS_FILE_LOCATOR) malloc(sizeof(EAS_FILE));
@@ -361,9 +361,9 @@ int JetPlayer::loadFromFD(const int fd, const long long offset, const long long
mEasJetFileLoc->offset = offset;
mEasJetFileLoc->length = length;
mEasJetFileLoc->path = NULL;
-
+
EAS_RESULT result = JET_OpenFile(mEasData, mEasJetFileLoc);
- if(result != EAS_SUCCESS)
+ if (result != EAS_SUCCESS)
mState = EAS_STATE_ERROR;
else
mState = EAS_STATE_OPEN;
@@ -392,7 +392,7 @@ int JetPlayer::play()
JET_Status(mEasData, &mJetStatus);
this->dumpJetStatus(&mJetStatus);
-
+
fireUpdateOnStatusChange();
// wake up render thread
@@ -468,7 +468,7 @@ void JetPlayer::dump()
void JetPlayer::dumpJetStatus(S_JET_STATUS* pJetStatus)
{
- if(pJetStatus!=NULL)
+ if (pJetStatus!=NULL)
ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d paused=%d",
pJetStatus->currentUserID, pJetStatus->segmentRepeatCount,
pJetStatus->numQueuedSegments, pJetStatus->paused);
@@ -478,4 +478,3 @@ void JetPlayer::dumpJetStatus(S_JET_STATUS* pJetStatus)
} // end namespace android
-
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 93ddca8..a7536b3 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -1099,12 +1099,12 @@ int MediaProfiles::getCamcorderProfileParamByName(const char *name,
camcorder_quality quality) const
{
ALOGV("getCamcorderProfileParamByName: %s for camera %d, quality %d",
- name, cameraId, quality);
+ name, cameraId, quality);
int index = getCamcorderProfileIndex(cameraId, quality);
if (index == -1) {
ALOGE("The given camcorder profile camera %d quality %d is not found",
- cameraId, quality);
+ cameraId, quality);
return -1;
}
diff --git a/media/libmedia/MediaScanner.cpp b/media/libmedia/MediaScanner.cpp
index 73d4519..28b5aa7 100644
--- a/media/libmedia/MediaScanner.cpp
+++ b/media/libmedia/MediaScanner.cpp
@@ -54,7 +54,7 @@ const char *MediaScanner::locale() const {
void MediaScanner::loadSkipList() {
mSkipList = (char *)malloc(PROPERTY_VALUE_MAX * sizeof(char));
if (mSkipList) {
- property_get("testing.mediascanner.skiplist", mSkipList, "");
+ property_get("testing.mediascanner.skiplist", mSkipList, "");
}
if (!mSkipList || (strlen(mSkipList) == 0)) {
free(mSkipList);
@@ -135,8 +135,8 @@ MediaScanResult MediaScanner::doProcessDirectory(
struct dirent* entry;
if (shouldSkipDirectory(path)) {
- ALOGD("Skipping: %s", path);
- return MEDIA_SCAN_RESULT_OK;
+ ALOGD("Skipping: %s", path);
+ return MEDIA_SCAN_RESULT_OK;
}
// Treat all files as non-media in directories that contain a ".nomedia" file
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index cdfd477..e1e3348 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -228,4 +228,3 @@ void MediaScannerClient::endFile()
}
} // namespace android
-
diff --git a/media/libmedia/Metadata.cpp b/media/libmedia/Metadata.cpp
index 546a9b0..ef8a9ed 100644
--- a/media/libmedia/Metadata.cpp
+++ b/media/libmedia/Metadata.cpp
@@ -57,7 +57,7 @@ namespace media {
Metadata::Metadata(Parcel *p)
:mData(p),
- mBegin(p->dataPosition()) { }
+ mBegin(p->dataPosition()) { }
Metadata::~Metadata() { }
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 9c3170c..717d316 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -268,8 +268,8 @@ const ToneGenerator::ToneDescriptor ToneGenerator::sToneDescriptors[] = {
repeatCnt: 0,
repeatSegment: 0 }, // TONE_CDMA_CALL_SIGNAL_ISDN_SP_PRI
{ segments: { { duration: 0, waveFreq: { 0 }, 0, 0} },
- repeatCnt: 0,
- repeatSegment: 0 }, // TONE_CDMA_CALL_SIGNAL_ISDN_PAT3
+ repeatCnt: 0,
+ repeatSegment: 0 }, // TONE_CDMA_CALL_SIGNAL_ISDN_PAT3
{ segments: { { duration: 32, waveFreq: { 2091, 0 }, 0, 0 },
{ duration: 64, waveFreq: { 2556, 0 }, 4, 0 },
{ duration: 20, waveFreq: { 2091, 0 }, 0, 0 },
@@ -1015,7 +1015,7 @@ bool ToneGenerator::initAudioTrack() {
mpAudioTrack = NULL;
}
- // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
+ // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
mpAudioTrack = new AudioTrack();
ALOGV("Create Track: %p", mpAudioTrack);
@@ -1284,9 +1284,9 @@ audioCallback_EndLoop:
ALOGV("Cbk starting track");
lpToneGen->mState = TONE_PLAYING;
lSignal = true;
- break;
+ break;
case TONE_PLAYING:
- break;
+ break;
default:
// Force loop exit
lNumSmp = 0;
@@ -1578,4 +1578,3 @@ void ToneGenerator::WaveGenerator::getSamples(short *outBuffer,
}
} // end namespace android
-
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 70f8c0c..bcd6ae4 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -66,7 +66,7 @@ status_t Visualizer::setEnabled(bool enabled)
}
}
t->mLock.lock();
- }
+ }
status_t status = AudioEffect::setEnabled(enabled);
@@ -320,4 +320,3 @@ void Visualizer::CaptureThread::onFirstRef()
}
}; // namespace android
-
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 8d53357..b0241aa 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -45,7 +45,7 @@ const sp<IMediaPlayerService>& MediaMetadataRetriever::getService()
}
ALOGW("MediaPlayerService not published, waiting...");
usleep(500000); // 0.5 s
- } while(true);
+ } while (true);
if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
}
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 4ff1862..7ea9490 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -195,8 +195,8 @@ status_t MediaPlayer::invoke(const Parcel& request, Parcel *reply)
(mCurrentState != MEDIA_PLAYER_STATE_ERROR) &&
((mCurrentState & MEDIA_PLAYER_IDLE) != MEDIA_PLAYER_IDLE);
if ((mPlayer != NULL) && hasBeenInitialized) {
- ALOGV("invoke %d", request.dataSize());
- return mPlayer->invoke(request, reply);
+ ALOGV("invoke %d", request.dataSize());
+ return mPlayer->invoke(request, reply);
}
ALOGE("invoke failed: wrong state %X", mCurrentState);
return INVALID_OPERATION;
@@ -556,9 +556,9 @@ status_t MediaPlayer::setAudioSessionId(int sessionId)
return BAD_VALUE;
}
if (sessionId != mAudioSessionId) {
- AudioSystem::releaseAudioSessionId(mAudioSessionId);
- AudioSystem::acquireAudioSessionId(sessionId);
- mAudioSessionId = sessionId;
+ AudioSystem::releaseAudioSessionId(mAudioSessionId);
+ AudioSystem::acquireAudioSessionId(sessionId);
+ mAudioSessionId = sessionId;
}
return NO_ERROR;
}
@@ -610,7 +610,7 @@ status_t MediaPlayer::getParameter(int key, Parcel *reply)
ALOGV("MediaPlayer::getParameter(%d)", key);
Mutex::Autolock _l(mLock);
if (mPlayer != NULL) {
- return mPlayer->getParameter(key, reply);
+ return mPlayer->getParameter(key, reply);
}
ALOGV("getParameter: no active player");
return INVALID_OPERATION;
@@ -658,7 +658,7 @@ void MediaPlayer::notify(int msg, int ext1, int ext2, const Parcel *obj)
// and seekTo within the same process.
// FIXME: Remember, this is a hack, it's not even a hack that is applied
// consistently for all use-cases, this needs to be revisited.
- if (mLockThreadId != getThreadId()) {
+ if (mLockThreadId != getThreadId()) {
mLock.lock();
locked = true;
}
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index cc73014..9541015 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -31,7 +31,7 @@ namespace android {
status_t MediaRecorder::setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
{
ALOGV("setCamera(%p,%p)", camera.get(), proxy.get());
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -52,7 +52,7 @@ status_t MediaRecorder::setCamera(const sp<ICamera>& camera, const sp<ICameraRec
status_t MediaRecorder::setPreviewSurface(const sp<Surface>& surface)
{
ALOGV("setPreviewSurface(%p)", surface.get());
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -77,7 +77,7 @@ status_t MediaRecorder::setPreviewSurface(const sp<Surface>& surface)
status_t MediaRecorder::init()
{
ALOGV("init");
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -107,7 +107,7 @@ status_t MediaRecorder::init()
status_t MediaRecorder::setVideoSource(int vs)
{
ALOGV("setVideoSource(%d)", vs);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -142,7 +142,7 @@ status_t MediaRecorder::setVideoSource(int vs)
status_t MediaRecorder::setAudioSource(int as)
{
ALOGV("setAudioSource(%d)", as);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -175,7 +175,7 @@ status_t MediaRecorder::setAudioSource(int as)
status_t MediaRecorder::setOutputFormat(int of)
{
ALOGV("setOutputFormat(%d)", of);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -201,7 +201,7 @@ status_t MediaRecorder::setOutputFormat(int of)
status_t MediaRecorder::setVideoEncoder(int ve)
{
ALOGV("setVideoEncoder(%d)", ve);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -231,7 +231,7 @@ status_t MediaRecorder::setVideoEncoder(int ve)
status_t MediaRecorder::setAudioEncoder(int ae)
{
ALOGV("setAudioEncoder(%d)", ae);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -261,7 +261,7 @@ status_t MediaRecorder::setAudioEncoder(int ae)
status_t MediaRecorder::setOutputFile(const char* path)
{
ALOGV("setOutputFile(%s)", path);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -287,7 +287,7 @@ status_t MediaRecorder::setOutputFile(const char* path)
status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
{
ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -324,7 +324,7 @@ status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
status_t MediaRecorder::setVideoSize(int width, int height)
{
ALOGV("setVideoSize(%d, %d)", width, height);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -367,7 +367,7 @@ sp<ISurfaceTexture> MediaRecorder::
status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
{
ALOGV("setVideoFrameRate(%d)", frames_per_second);
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -391,7 +391,7 @@ status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
status_t MediaRecorder::setParameters(const String8& params) {
ALOGV("setParameters(%s)", params.string());
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -419,7 +419,7 @@ status_t MediaRecorder::setParameters(const String8& params) {
status_t MediaRecorder::prepare()
{
ALOGV("prepare");
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -458,7 +458,7 @@ status_t MediaRecorder::prepare()
status_t MediaRecorder::getMaxAmplitude(int* max)
{
ALOGV("getMaxAmplitude");
- if(mMediaRecorder == NULL) {
+ if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
@@ -536,7 +536,7 @@ status_t MediaRecorder::reset()
doCleanUp();
status_t ret = UNKNOWN_ERROR;
- switch(mCurrentState) {
+ switch (mCurrentState) {
case MEDIA_RECORDER_IDLE:
ret = OK;
break;
@@ -547,7 +547,7 @@ status_t MediaRecorder::reset()
case MEDIA_RECORDER_ERROR: {
ret = doReset();
if (OK != ret) {
- return ret; // No need to continue
+ return ret; // No need to continue
}
} // Intentional fall through
case MEDIA_RECORDER_INITIALIZED:
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index fad7087..bfa4a49 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -190,13 +190,13 @@ void AudioFlinger::onFirstRef()
continue;
ALOGI("Loaded %s audio interface from %s (%s)", audio_interfaces[i],
- mod->name, mod->id);
+ mod->name, mod->id);
mAudioHwDevs.push(dev);
if (mPrimaryHardwareDev == NULL) {
mPrimaryHardwareDev = dev;
ALOGI("Using '%s' (%s.%s) as the primary audio interface",
- mod->name, mod->id, audio_interfaces[i]);
+ mod->name, mod->id, audio_interfaces[i]);
}
}
@@ -515,7 +515,7 @@ sp<IAudioTrack> AudioFlinger::createTrack(
}
Exit:
- if(status) {
+ if (status != NULL) {
*status = lStatus;
}
return trackHandle;
@@ -610,7 +610,7 @@ status_t AudioFlinger::setMasterVolume(float value)
mMasterVolume = value;
mMasterVolumeSW = swmv;
for (size_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setMasterVolume(swmv);
+ mPlaybackThreads.valueAt(i)->setMasterVolume(swmv);
return NO_ERROR;
}
@@ -642,7 +642,7 @@ status_t AudioFlinger::setMode(audio_mode_t mode)
Mutex::Autolock _l(mLock);
mMode = mode;
for (size_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setMode(mode);
+ mPlaybackThreads.valueAt(i)->setMode(mode);
}
return ret;
@@ -693,7 +693,7 @@ status_t AudioFlinger::setMasterMute(bool muted)
// This is an optimization, so PlaybackThread doesn't have to look at the one from AudioFlinger
mMasterMute = muted;
for (size_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setMasterMute(muted);
+ mPlaybackThreads.valueAt(i)->setMasterMute(muted);
return NO_ERROR;
}
@@ -761,7 +761,7 @@ status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
if (thread == NULL) {
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
+ mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
}
} else {
thread->setStreamVolume(stream, value);
@@ -786,7 +786,7 @@ status_t AudioFlinger::setStreamMute(audio_stream_type_t stream, bool muted)
AutoMutex lock(mLock);
mStreamTypes[stream].mute = muted;
for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setStreamMute(stream, muted);
+ mPlaybackThreads.valueAt(i)->setStreamMute(stream, muted);
return NO_ERROR;
}
@@ -1161,7 +1161,7 @@ void AudioFlinger::ThreadBase::sendConfigEvent_l(int event, int param)
void AudioFlinger::ThreadBase::processConfigEvents()
{
mLock.lock();
- while(!mConfigEvents.isEmpty()) {
+ while (!mConfigEvents.isEmpty()) {
ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
ConfigEvent configEvent = mConfigEvents[0];
mConfigEvents.removeAt(0);
@@ -1345,13 +1345,13 @@ void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectCha
mSuspendedSessions.editValueAt(index);
for (size_t i = 0; i < sessionEffects.size(); i++) {
- sp <SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
+ sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
for (int j = 0; j < desc->mRefCount; j++) {
if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) {
chain->setEffectSuspendedAll_l(true);
} else {
ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
- desc->mType.timeLow);
+ desc->mType.timeLow);
chain->setEffectSuspended_l(&desc->mType, true);
}
}
@@ -1386,7 +1386,7 @@ void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *ty
}
index = sessionEffects.indexOfKey(key);
- sp <SuspendedSessionDesc> desc;
+ sp<SuspendedSessionDesc> desc;
if (suspend) {
if (index >= 0) {
desc = sessionEffects.valueAt(index);
@@ -1659,14 +1659,14 @@ sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTra
// createTrack() was called by the client process.
if (!mStreamTypes[streamType].valid) {
ALOGW("createTrack_l() on thread %p: invalidating track on stream %d",
- this, streamType);
+ this, streamType);
android_atomic_or(CBLK_INVALID_ON, &track->mCblk->flags);
}
}
lStatus = NO_ERROR;
Exit:
- if(status) {
+ if (status) {
*status = lStatus;
}
return track;
@@ -2645,10 +2645,10 @@ bool AudioFlinger::MixerThread::checkForNewParameters_l()
status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
keyValuePair.string());
if (!mStandby && status == INVALID_OPERATION) {
- mOutput->stream->common.standby(&mOutput->stream->common);
- mStandby = true;
- mBytesWritten = 0;
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ mOutput->stream->common.standby(&mOutput->stream->common);
+ mStandby = true;
+ mBytesWritten = 0;
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
keyValuePair.string());
}
if (status == NO_ERROR && reconfig) {
@@ -2902,7 +2902,7 @@ void AudioFlinger::DirectOutputThread::threadLoop_mix()
size_t count = mFrameCount * mChannelCount;
uint8_t *src = (uint8_t *)mMixBuffer + count-1;
int16_t *dst = mMixBuffer + count-1;
- while(count--) {
+ while (count--) {
*dst-- = (int16_t)(*src--^0x80) << 8;
}
}
@@ -2955,7 +2955,7 @@ void AudioFlinger::DirectOutputThread::threadLoop_mix()
size_t count = mFrameCount * mChannelCount;
int16_t *src = mMixBuffer;
uint8_t *dst = (uint8_t *)mMixBuffer;
- while(count--) {
+ while (count--) {
*dst++ = (uint8_t)(((int32_t)*src++ + (1<<7)) >> 8)^0x80;
}
}
@@ -3014,10 +3014,10 @@ bool AudioFlinger::DirectOutputThread::checkForNewParameters_l()
status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
keyValuePair.string());
if (!mStandby && status == INVALID_OPERATION) {
- mOutput->stream->common.standby(&mOutput->stream->common);
- mStandby = true;
- mBytesWritten = 0;
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+ mOutput->stream->common.standby(&mOutput->stream->common);
+ mStandby = true;
+ mBytesWritten = 0;
+ status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
keyValuePair.string());
}
if (status == NO_ERROR && reconfig) {
@@ -3208,7 +3208,7 @@ void AudioFlinger::DuplicatingThread::updateWaitTime_l()
bool AudioFlinger::DuplicatingThread::outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks)
{
for (size_t i = 0; i < outputTracks.size(); i++) {
- sp <ThreadBase> thread = outputTracks[i]->thread().promote();
+ sp<ThreadBase> thread = outputTracks[i]->thread().promote();
if (thread == 0) {
ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p", outputTracks[i].get());
return false;
@@ -3264,14 +3264,14 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
// ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
- size_t size = sizeof(audio_track_cblk_t);
- uint8_t channelCount = popcount(channelMask);
- size_t bufferSize = frameCount*channelCount*sizeof(int16_t);
- if (sharedBuffer == 0) {
- size += bufferSize;
- }
-
- if (client != NULL) {
+ size_t size = sizeof(audio_track_cblk_t);
+ uint8_t channelCount = popcount(channelMask);
+ size_t bufferSize = frameCount*channelCount*sizeof(int16_t);
+ if (sharedBuffer == 0) {
+ size += bufferSize;
+ }
+
+ if (client != NULL) {
mCblkMemory = client->heap()->allocate(size);
if (mCblkMemory != 0) {
mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
@@ -3298,22 +3298,22 @@ AudioFlinger::ThreadBase::TrackBase::TrackBase(
client->heap()->dump("AudioTrack");
return;
}
- } else {
- mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
- // construct the shared structure in-place.
- new(mCblk) audio_track_cblk_t();
- // clear all buffers
- mCblk->frameCount = frameCount;
- mCblk->sampleRate = sampleRate;
- mChannelCount = channelCount;
- mChannelMask = channelMask;
- mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
- memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
- // Force underrun condition to avoid false underrun callback until first data is
- // written to buffer (other flags are cleared)
- mCblk->flags = CBLK_UNDERRUN_ON;
- mBufferEnd = (uint8_t *)mBuffer + bufferSize;
- }
+ } else {
+ mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
+ // construct the shared structure in-place.
+ new(mCblk) audio_track_cblk_t();
+ // clear all buffers
+ mCblk->frameCount = frameCount;
+ mCblk->sampleRate = sampleRate;
+ mChannelCount = channelCount;
+ mChannelMask = channelMask;
+ mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
+ memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
+ // Force underrun condition to avoid false underrun callback until first data is
+ // written to buffer (other flags are cleared)
+ mCblk->flags = CBLK_UNDERRUN_ON;
+ mBufferEnd = (uint8_t *)mBuffer + bufferSize;
+ }
}
AudioFlinger::ThreadBase::TrackBase::~TrackBase()
@@ -3491,22 +3491,22 @@ void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
// AudioBufferProvider interface
status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts)
+ AudioBufferProvider::Buffer* buffer, int64_t pts)
{
- audio_track_cblk_t* cblk = this->cblk();
- uint32_t framesReady;
- uint32_t framesReq = buffer->frameCount;
+ audio_track_cblk_t* cblk = this->cblk();
+ uint32_t framesReady;
+ uint32_t framesReq = buffer->frameCount;
- // Check if last stepServer failed, try to step now
- if (mStepServerFailed) {
- if (!step()) goto getNextBuffer_exit;
- ALOGV("stepServer recovered");
- mStepServerFailed = false;
- }
+ // Check if last stepServer failed, try to step now
+ if (mStepServerFailed) {
+ if (!step()) goto getNextBuffer_exit;
+ ALOGV("stepServer recovered");
+ mStepServerFailed = false;
+ }
- framesReady = cblk->framesReady();
+ framesReady = cblk->framesReady();
- if (CC_LIKELY(framesReady)) {
+ if (CC_LIKELY(framesReady)) {
uint32_t s = cblk->server;
uint32_t bufferEnd = cblk->serverBase + cblk->frameCount;
@@ -3518,21 +3518,21 @@ status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
framesReq = bufferEnd - s;
}
- buffer->raw = getBuffer(s, framesReq);
- if (buffer->raw == NULL) goto getNextBuffer_exit;
+ buffer->raw = getBuffer(s, framesReq);
+ if (buffer->raw == NULL) goto getNextBuffer_exit;
- buffer->frameCount = framesReq;
+ buffer->frameCount = framesReq;
return NO_ERROR;
- }
+ }
getNextBuffer_exit:
- buffer->raw = NULL;
- buffer->frameCount = 0;
- ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
- return NOT_ENOUGH_DATA;
+ buffer->raw = NULL;
+ buffer->frameCount = 0;
+ ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
+ return NOT_ENOUGH_DATA;
}
-uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const{
+uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const {
return mCblk->framesReady();
}
@@ -3684,8 +3684,8 @@ status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
status_t status = DEAD_OBJECT;
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- status = playbackThread->attachAuxEffect(this, EffectId);
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ status = playbackThread->attachAuxEffect(this, EffectId);
}
return status;
}
@@ -4104,14 +4104,14 @@ AudioFlinger::RecordThread::RecordTrack::RecordTrack(
mOverflow(false)
{
if (mCblk != NULL) {
- ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
- if (format == AUDIO_FORMAT_PCM_16_BIT) {
- mCblk->frameSize = mChannelCount * sizeof(int16_t);
- } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
- mCblk->frameSize = mChannelCount * sizeof(int8_t);
- } else {
- mCblk->frameSize = sizeof(int8_t);
- }
+ ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+ if (format == AUDIO_FORMAT_PCM_16_BIT) {
+ mCblk->frameSize = mChannelCount * sizeof(int16_t);
+ } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
+ mCblk->frameSize = mChannelCount * sizeof(int8_t);
+ } else {
+ mCblk->frameSize = sizeof(int8_t);
+ }
}
}
@@ -4130,7 +4130,7 @@ status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvi
uint32_t framesAvail;
uint32_t framesReq = buffer->frameCount;
- // Check if last stepServer failed, try to step now
+ // Check if last stepServer failed, try to step now
if (mStepServerFailed) {
if (!step()) goto getNextBuffer_exit;
ALOGV("stepServer recovered");
@@ -4976,7 +4976,7 @@ Exit:
status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack, pid_t tid)
{
ALOGV("RecordThread::start tid=%d", tid);
- sp <ThreadBase> strongMe = this;
+ sp<ThreadBase> strongMe = this;
status_t status = NO_ERROR;
{
AutoMutex lock(mLock);
@@ -5029,7 +5029,7 @@ startError:
void AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
ALOGV("RecordThread::stop");
- sp <ThreadBase> strongMe = this;
+ sp<ThreadBase> strongMe = this;
{
AutoMutex lock(mLock);
if (mActiveTrack != 0 && recordTrack == mActiveTrack.get()) {
@@ -5196,8 +5196,9 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l()
if (status == NO_ERROR) {
status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
if (status == INVALID_OPERATION) {
- mInput->stream->common.standby(&mInput->stream->common);
- status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
+ mInput->stream->common.standby(&mInput->stream->common);
+ status = mInput->stream->common.set_parameters(&mInput->stream->common,
+ keyValuePair.string());
}
if (reconfig) {
if (status == BAD_VALUE &&
@@ -5285,8 +5286,8 @@ void AudioFlinger::RecordThread::readInputParameters()
if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
{
int channelCount;
- // optmization: if mono to mono, use the resampler in stereo to stereo mode to avoid
- // stereo to mono post process as the resampler always outputs stereo.
+ // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
+ // stereo to mono post process as the resampler always outputs stereo.
if (mChannelCount == 1 && mReqChannelCount == 2) {
channelCount = 1;
} else {
@@ -5460,7 +5461,7 @@ status_t AudioFlinger::closeOutput(audio_io_handle_t output)
{
// keep strong reference on the playback thread so that
// it is not destroyed while exit() is executed
- sp <PlaybackThread> thread;
+ sp<PlaybackThread> thread;
{
Mutex::Autolock _l(mLock);
thread = checkPlaybackThread_l(output);
@@ -5613,7 +5614,7 @@ status_t AudioFlinger::closeInput(audio_io_handle_t input)
{
// keep strong reference on the record thread so that
// it is not destroyed while exit() is executed
- sp <RecordThread> thread;
+ sp<RecordThread> thread;
{
Mutex::Autolock _l(mLock);
thread = checkRecordThread_l(input);
@@ -5746,7 +5747,7 @@ void AudioFlinger::purgeStaleEffects_l() {
AudioSessionRef *ref = mAudioSessionRefs.itemAt(k);
if (ref->mSessionid == sessionid) {
ALOGV(" session %d still exists for %d with %d refs",
- sessionid, ref->mPid, ref->mCnt);
+ sessionid, ref->mPid, ref->mCnt);
found = true;
break;
}
@@ -5979,7 +5980,7 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid,
// because of code checking output when entering the function.
// Note: io is never 0 when creating an effect on an input
if (io == 0) {
- // look for the thread where the specified audio session is present
+ // look for the thread where the specified audio session is present
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
if (mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
io = mPlaybackThreads.keyAt(i);
@@ -5987,12 +5988,12 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid,
}
}
if (io == 0) {
- for (size_t i = 0; i < mRecordThreads.size(); i++) {
- if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
- io = mRecordThreads.keyAt(i);
- break;
- }
- }
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+ io = mRecordThreads.keyAt(i);
+ break;
+ }
+ }
}
// If no output thread contains the requested session ID, default to
// first output. The effect chain will be moved to the correct output
@@ -6023,7 +6024,7 @@ sp<IEffect> AudioFlinger::createEffect(pid_t pid,
}
Exit:
- if(status) {
+ if (status != NULL) {
*status = lStatus;
}
return handle;
@@ -6226,7 +6227,7 @@ Exit:
handle.clear();
}
- if(status) {
+ if (status != NULL) {
*status = lStatus;
}
return handle;
@@ -6296,7 +6297,7 @@ void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) {
}
void AudioFlinger::ThreadBase::lockEffectChains_l(
- Vector<sp <AudioFlinger::EffectChain> >& effectChains)
+ Vector< sp<AudioFlinger::EffectChain> >& effectChains)
{
effectChains = mEffectChains;
for (size_t i = 0; i < mEffectChains.size(); i++) {
@@ -6305,7 +6306,7 @@ void AudioFlinger::ThreadBase::lockEffectChains_l(
}
void AudioFlinger::ThreadBase::unlockEffectChains(
- const Vector<sp <AudioFlinger::EffectChain> >& effectChains)
+ const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
{
for (size_t i = 0; i < effectChains.size(); i++) {
effectChains[i]->unlock();
@@ -6481,7 +6482,7 @@ status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(
void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId)
{
- for (size_t i = 0; i < mTracks.size(); ++i) {
+ for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (track->auxEffectId() == effectId) {
attachAuxEffect_l(track, 0);
@@ -7265,7 +7266,7 @@ AudioFlinger::EffectHandle::EffectHandle(const sp<EffectModule>& effect,
if (mCblk != NULL) {
new(mCblk) effect_param_cblk_t();
mBuffer = (uint8_t *)mCblk + bufOffset;
- }
+ }
} else {
ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t));
return;
@@ -8032,7 +8033,7 @@ void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModul
}
}
ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x",
- effect->desc().type.timeLow);
+ effect->desc().type.timeLow);
sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
// if effect is requested to suspended but was not yet enabled, supend it now.
if (desc->mEffect == 0) {
@@ -8045,7 +8046,7 @@ void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModul
return;
}
ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x",
- effect->desc().type.timeLow);
+ effect->desc().type.timeLow);
sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
desc->mEffect.clear();
effect->setSuspended(false);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index f3c8dd2..0e4b24a 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -465,9 +465,9 @@ private:
// ThreadBase mutex before processing the mixer and effects. This guarantees the
// integrity of the chains during the process.
// Also sets the parameter 'effectChains' to current value of mEffectChains.
- void lockEffectChains_l(Vector<sp <EffectChain> >& effectChains);
+ void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
// unlock effect chains after process
- void unlockEffectChains(const Vector<sp<EffectChain> >& effectChains);
+ void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
// set audio mode to all effect chains
void setMode(audio_mode_t mode);
// get effect module with corresponding ID on specified audio session
@@ -1056,7 +1056,7 @@ private:
virtual uint32_t activeSleepTimeUs();
private:
- bool outputsReady(const SortedVector<sp<OutputTrack> > &outputTracks);
+ bool outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
protected:
// threadLoop snippets
virtual void threadLoop_mix();
@@ -1504,7 +1504,7 @@ mutable Mutex mLock; // mutex for process, commands and handl
uint32_t strategy() const { return mStrategy; }
void setStrategy(uint32_t strategy)
- { mStrategy = strategy; }
+ { mStrategy = strategy; }
// suspend effect of the given type
void setEffectSuspended_l(const effect_uuid_t *type,
@@ -1544,7 +1544,7 @@ mutable Mutex mLock; // mutex for process, commands and handl
wp<ThreadBase> mThread; // parent mixer thread
Mutex mLock; // mutex protecting effect list
- Vector<sp<EffectModule> > mEffects; // list of effect modules
+ Vector< sp<EffectModule> > mEffects; // list of effect modules
int mSessionId; // audio session ID
int16_t *mInBuffer; // chain input buffer
int16_t *mOutBuffer; // chain output buffer
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index d57326b..c23eb04 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -649,7 +649,7 @@ bool AudioPolicyService::AudioCommandThread::threadLoop()
mLock.lock();
while (!exitPending())
{
- while(!mAudioCommands.isEmpty()) {
+ while (!mAudioCommands.isEmpty()) {
nsecs_t curTime = systemTime();
// commands are sorted by increasing time stamp: execute them from index 0 and up
if (mAudioCommands[0]->mTime <= curTime) {
@@ -693,16 +693,16 @@ bool AudioPolicyService::AudioCommandThread::threadLoop()
delete data;
}break;
case SET_PARAMETERS: {
- ParametersData *data = (ParametersData *)command->mParam;
- ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
- data->mKeyValuePairs.string(), data->mIO);
- command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
- if (command->mWaitStatus) {
- command->mCond.signal();
- mWaitWorkCV.wait(mLock);
- }
- delete data;
- }break;
+ ParametersData *data = (ParametersData *)command->mParam;
+ ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
+ data->mKeyValuePairs.string(), data->mIO);
+ command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
+ if (command->mWaitStatus) {
+ command->mCond.signal();
+ mWaitWorkCV.wait(mLock);
+ }
+ delete data;
+ }break;
case SET_VOICE_VOLUME: {
VoiceVolumeData *data = (VoiceVolumeData *)command->mParam;
ALOGV("AudioCommandThread() processing set voice volume volume %f",
@@ -916,19 +916,19 @@ void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *comma
AudioParameter param = AudioParameter(data->mKeyValuePairs);
AudioParameter param2 = AudioParameter(data2->mKeyValuePairs);
for (size_t j = 0; j < param.size(); j++) {
- String8 key;
- String8 value;
- param.getAt(j, key, value);
- for (size_t k = 0; k < param2.size(); k++) {
- String8 key2;
- String8 value2;
- param2.getAt(k, key2, value2);
- if (key2 == key) {
- param2.remove(key2);
- ALOGV("Filtering out parameter %s", key2.string());
- break;
- }
- }
+ String8 key;
+ String8 value;
+ param.getAt(j, key, value);
+ for (size_t k = 0; k < param2.size(); k++) {
+ String8 key2;
+ String8 value2;
+ param2.getAt(k, key2, value2);
+ if (key2 == key) {
+ param2.remove(key2);
+ ALOGV("Filtering out parameter %s", key2.string());
+ break;
+ }
+ }
}
// if all keys have been filtered out, remove the command.
// otherwise, update the key value pairs
@@ -1020,7 +1020,7 @@ int AudioPolicyService::startTone(audio_policy_tone_t tone,
ALOGE("startTone: illegal tone requested (%d)", tone);
if (stream != AUDIO_STREAM_VOICE_CALL)
ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
- tone);
+ tone);
mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
AUDIO_STREAM_VOICE_CALL);
return 0;
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 7119b90..9ed905d 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -311,8 +311,8 @@ private:
mutable Mutex mLock; // prevents concurrent access to AudioPolicy manager functions changing
// device connection state or routing
- sp <AudioCommandThread> mAudioCommandThread; // audio commands thread
- sp <AudioCommandThread> mTonePlaybackThread; // tone playback thread
+ sp<AudioCommandThread> mAudioCommandThread; // audio commands thread
+ sp<AudioCommandThread> mTonePlaybackThread; // tone playback thread
struct audio_policy_device *mpAudioPolicyDev;
struct audio_policy *mpAudioPolicy;
KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 398ba0b..fbb54cf 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -227,7 +227,7 @@ void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount,
mX0L = mBuffer.i16[mBuffer.frameCount*2-2];
mX0R = mBuffer.i16[mBuffer.frameCount*2-1];
provider->releaseBuffer(&mBuffer);
- // mBuffer.frameCount == 0 now so we reload a new buffer
+ // mBuffer.frameCount == 0 now so we reload a new buffer
}
int16_t *in = mBuffer.i16;
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 9deb796..1610e00 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -33,7 +33,7 @@ public:
// HIGH_QUALITY: fixed multi-tap FIR (e.g. 48KHz->44.1KHz)
// NOTE: high quality SRC will only be supported for
// certain fixed rate conversions. Sample rate cannot be
- // changed dynamically.
+ // changed dynamically.
enum src_quality {
DEFAULT=0,
LOW_QUALITY=1,
diff --git a/services/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h
index b72b62a..892785a 100644
--- a/services/audioflinger/AudioResamplerCubic.h
+++ b/services/audioflinger/AudioResamplerCubic.h
@@ -55,7 +55,7 @@ private:
p->y1 = p->y2;
p->y2 = p->y3;
p->y3 = in;
- p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;
+ p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;
p->b = (p->y2 << 1) + p->y0 - (((5 * p->y1 + p->y3)) >> 1);
p->c = (p->y2 - p->y0) >> 1;
}
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index d373c08..76662d8 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -222,7 +222,7 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
} else {
read<CHANNELS>(impulse, phaseFraction, mBuffer.i16, inputIndex);
}
- }
+ }
}
int16_t *in = mBuffer.i16;
const size_t frameCount = mBuffer.frameCount;
@@ -247,7 +247,7 @@ void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
if (inputIndex >= frameCount)
break; // need a new buffer
read<CHANNELS>(impulse, phaseFraction, in, inputIndex);
- } else if(phaseIndex == 2) { // maximum value
+ } else if (phaseIndex == 2) { // maximum value
inputIndex++;
if (inputIndex >= frameCount)
break; // 0 frame available, 2 frames needed