AAOS系列之(七) --- AudioRecord录音逻辑分析(一)
一文讲透AAOS架构,点到为止不藏私
📌 这篇帖子给大家分析下 AudioRecord的初始化
1. 场景介绍:
在 AAOS 的 Framework 开发中,录音模块几乎是每个项目都会涉及的重要组成部分。无论是语音控制、车内对讲(同行者模式),还是集成科大讯飞等语音识别引擎,都高度依赖系统 Framework 层向 App 层提供稳定、可用的录音数据。
在实现录音功能时,有几个关键参数必须严格设置:采样率、通道数和音频格式。其中,采样率尤为关键。如果设置不当,录制下来的音频数据可能无法被正常解码和回放,从而导致语音引擎识别失败,严重影响用户体验。
因此,在平台开发阶段就需要明确并统一这些参数配置,确保整个语音链路的稳定性和兼容性。
APP端录音的基础代码如下:
// 音频获取源private int audioSource = MediaRecorder.AudioSource.MIC;// 设置音频采样率,44100是目前的标准,但是某些设备仍然支持22050,16000,11025private static int sampleRateInHz = 16000;// 设置音频的录制的声道CHANNEL_IN_STEREO为双声道,CHANNEL_CONFIGURATION_MONO为单声道private static int channelConfig = AudioFormat.CHANNEL_IN_STEREO;// 音频数据格式:PCM 16位每个样本。保证设备支持。PCM 8位每个样本。不一定能得到设备支持。private static int audioFormat = AudioFormat.ENCODING_PCM_16BIT;private int bufferSizeInBytes = 0;private AudioRecord audioRecord;private void creatAudioRecord() {// true: 内建APP使用的录音方式; false:三方APP使用的录音方式.boolean isBuild = true ;if (isBuild){ // MIC1+MIC2+REF1+REF2bufferSizeInBytes = 5120;final AudioFormat audioFormat1 = new AudioFormat.Builder().setEncoding(AudioFormat.ENCODING_PCM_16BIT).setSampleRate(sampleRateInHz).setChannelIndexMask(0xf) // 设置了这个参数, 采集的就是4通道的数据.build();audioRecord = new AudioRecord.Builder().setAudioFormat(audioFormat1).build();}else{//channelConfig = AudioFormat.CHANNEL_IN_STEREO | AudioFormat.CHANNEL_IN_FRONT_BACK;// 获得缓冲区字节大小bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,channelConfig, audioFormat);Log.d(TAG, "creatAudioRecord: bufferSizeInBytes = " + bufferSizeInBytes);audioRecord = new AudioRecord(audioSource, sampleRateInHz,channelConfig, audioFormat, bufferSizeInBytes);}
调用到AudioRecord.java的构造方法,常规APP,比如Hicar,微信, 都是通过标准的API 来调用录音的接口:
//channelConfig = AudioFormat.CHANNEL_IN_STEREO | AudioFormat.CHANNEL_IN_FRONT_BACK;
// 获得缓冲区字节大小
bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,channelConfig, audioFormat);
Log.d(TAG, "creatAudioRecord: bufferSizeInBytes = " + bufferSizeInBytes);
// 调用AudioRecord的构造方法.
audioRecord = new AudioRecord(audioSource, sampleRateInHz,channelConfig, audioFormat, bufferSizeInBytes);
接下来, 我们来看下AudioRecord.java这个类的方法:
1.调用AudioRecord 的构造方法:
public class AudioRecord implements AudioRouting, MicrophoneDirection,AudioRecordingMonitor, AudioRecordingMonitorClient
{
// 标准的APP接口
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat,int bufferSizeInBytes)throws IllegalArgumentException {this((new AudioAttributes.Builder()).setInternalCapturePreset(audioSource).build(),(new AudioFormat.Builder())// 2.这里有个将channelConfig 转换成ChannelMask的流程..setChannelMask(getChannelMaskFromLegacyConfig(channelConfig,true/*allow legacy configurations*/)).setEncoding(audioFormat).setSampleRate(sampleRateInHz).build(),bufferSizeInBytes,AudioManager.AUDIO_SESSION_ID_GENERATE);}}
2.调用getChannelMaskFromLegacyConfig(),将channelConfig转换成ChannelMask:
// 调用这个方法, 我们传入的参数是:CHANNEL_IN_STEREO = 12;
private static int getChannelMaskFromLegacyConfig(int inChannelConfig,boolean allowLegacyConfig) {int mask;switch (inChannelConfig) {case AudioFormat.CHANNEL_IN_DEFAULT: // AudioFormat.CHANNEL_CONFIGURATION_DEFAULTcase AudioFormat.CHANNEL_IN_MONO:case AudioFormat.CHANNEL_CONFIGURATION_MONO:mask = AudioFormat.CHANNEL_IN_MONO;break;// 传入立体声通道,设置mask为CHANNEL_IN_STEREO,没有改变case AudioFormat.CHANNEL_IN_STEREO:case AudioFormat.CHANNEL_CONFIGURATION_STEREO:mask = AudioFormat.CHANNEL_IN_STEREO;break;case (AudioFormat.CHANNEL_IN_FRONT | AudioFormat.CHANNEL_IN_BACK):mask = inChannelConfig;break;default:throw new IllegalArgumentException("Unsupported channel configuration.");}if (!allowLegacyConfig && ((inChannelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO)|| (inChannelConfig == AudioFormat.CHANNEL_CONFIGURATION_STEREO))) {// only happens with the constructor that uses AudioAttributes and AudioFormatthrow new IllegalArgumentException("Unsupported deprecated configuration.");}// 返回mask, 这里的值就是CHANNEL_IN_STEREO = 12return mask;}
3.调用setChannelMask(),把计算后的mask值设置给AudioRecorder:
这个方法没有什么复杂的逻辑,只是把channelMask直接保存到成员变量mChannelMask
public @NonNull Builder setChannelMask(int channelMask) {if (channelMask == CHANNEL_INVALID) {throw new IllegalArgumentException("Invalid zero channel mask");} else if (/* channelMask != 0 && */ mChannelIndexMask != 0 &&Integer.bitCount(channelMask) != Integer.bitCount(mChannelIndexMask)) {throw new IllegalArgumentException("Mismatched channel count for mask " +Integer.toHexString(channelMask).toUpperCase());}// 把传入的channelMask保存到mChannelMask mChannelMask = channelMask;// 给mPropertySetMask 赋值, 后面会用到mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK;return this;}
4.调用build()方法,把传入的参数封装成AuidoFormat:
public AudioFormat build() {AudioFormat af = new AudioFormat(mPropertySetMask,mEncoding,mSampleRate,mChannelMask,mChannelIndexMask);return af;}
5.创建好了AudioFormat ,根据它来构建AudioRecorder:
// 这里的AudioFormat 就是通过build()方法构建,里面封装了录音的参数
public AudioRecord(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,int sessionId) throws IllegalArgumentException {mRecordingState = RECORDSTATE_STOPPED;if (attributes == null) {throw new IllegalArgumentException("Illegal null AudioAttributes");}if (format == null) {throw new IllegalArgumentException("Illegal null AudioFormat");}// remember which looper is associated with the AudioRecord instanciationif ((mInitializationLooper = Looper.myLooper()) == null) {mInitializationLooper = Looper.getMainLooper();}// is this AudioRecord using REMOTE_SUBMIX at full volume?if (attributes.getCapturePreset() == MediaRecorder.AudioSource.REMOTE_SUBMIX) {final AudioAttributes.Builder filteredAttr = new AudioAttributes.Builder();final Iterator<String> tagsIter = attributes.getTags().iterator();while (tagsIter.hasNext()) {final String tag = tagsIter.next();if (tag.equalsIgnoreCase(SUBMIX_FIXED_VOLUME)) {mIsSubmixFullVolume = true;Log.v(TAG, "Will record from REMOTE_SUBMIX at full fixed volume");} else { // SUBMIX_FIXED_VOLUME: is not to be propagated to the native layersfilteredAttr.addTag(tag);}}filteredAttr.setInternalCapturePreset(attributes.getCapturePreset());mAudioAttributes = filteredAttr.build();} else {mAudioAttributes = attributes;}// 从format中读取采样率, 这里我们传入的是16看int rate = format.getSampleRate();if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {rate = 0;}// int encoding = AudioFormat.ENCODING_DEFAULT;if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0){encoding = format.getEncoding();}audioParamCheck(attributes.getCapturePreset(), rate, encoding);if ((format.getPropertySetMask()& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {mChannelIndexMask = format.getChannelIndexMask();mChannelCount = format.getChannelCount();}//AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK这个参数还记得吗, 在上面我们赋值了.if ((format.getPropertySetMask()& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {// 计算mChannelMask mChannelMask = getChannelMaskFromLegacyConfig(format.getChannelMask(), false);mChannelCount = format.getChannelCount();} else if (mChannelIndexMask == 0) {mChannelMask = getChannelMaskFromLegacyConfig(AudioFormat.CHANNEL_IN_DEFAULT, false);mChannelCount = AudioFormat.channelCountFromInChannelMask(mChannelMask);}audioBuffSizeCheck(bufferSizeInBytes);int[] sampleRate = new int[] {mSampleRate};int[] session = new int[1];session[0] = sessionId;//TODO: update native initialization when information about hardware init failure// due to capture device already open is available.// 重要步骤, 调用JNI的方法native_setup,传入了//sampleRate(16000),//mChannelMask(立体声:CHANNEL_IN_STEREO = 12),//mChannelIndexMask(未使用,0),//mAudioFormat(ENCODING_PCM_16BIT = 2)int initResult = native_setup( new WeakReference<AudioRecord>(this),mAudioAttributes, sampleRate, mChannelMask, mChannelIndexMask,mAudioFormat, mNativeBufferSizeInBytes,session, getCurrentOpPackageName(), 0 /*nativeRecordInJavaObj*/);if (initResult != SUCCESS) {loge("Error code "+initResult+" when initializing native AudioRecord object.");return; // with mState == STATE_UNINITIALIZED}mSampleRate = sampleRate[0];mSessionId = session[0];mState = STATE_INITIALIZED;}
这里补充说明下AudioRecorder类中有关的两个Mask的差异
/*** The audio channel position mask* 用来标记通道位置的掩码,比如左前,右前,中*/private int mChannelMask;/*** The audio channel index mask* 用来标记通道索引的掩码*/private int mChannelIndexMask;
AudioRecord 中的 mChannelMask 与 mChannelIndexMask 区别分析
一、mChannelMask:Channel Position Mask
全称:Channel Position Mask(通道“位置”掩码)
作用:描述音频每个通道的空间位置含义(例如:左、右、前、后等)
常见值:
AudioFormat.CHANNEL_IN_MONO(单声道 = FRONT)AudioFormat.CHANNEL_IN_STEREO(立体声 = LEFT + RIGHT)AudioFormat.CHANNEL_IN_LEFTAudioFormat.CHANNEL_IN_RIGHT
适用场景:
普通麦克风录音
常规的音频输入配置
✅ 更具人类语义,使用方便,适合大部分常规用途
二、mChannelIndexMask:Channel Index Mask
全称:Channel Index Mask(通道“索引”掩码)
作用:通道数据由具体索引表示(如第 0 通道、第 1 通道)
常见值(组合使用):
AudioFormat.CHANNEL_INDEX_MASK_0AudioFormat.CHANNEL_INDEX_MASK_1
适用场景:
多麦克风阵列(如车载阵列、智能音箱)
高级音频处理(如波束成形、声源定位)
自定义通道顺序或无具体空间语义时
更灵活、可扩展,但不包含语义,需自行定义每个通道含义
对比差异如下:
属性 | mChannelMask | mChannelIndexMask |
---|---|---|
表示方式 | 通道空间位置(如 LEFT、RIGHT) | 通道数组索引(如 index 0, 1) |
语义表达 | 明确(空间含义) | 无语义(需业务定义) |
使用便捷性 | 高 | 较复杂 |
应用场景 | 普通音频采集 | 多通道音频处理(如阵列麦克) |
是否可共存 | 否,通常仅使用其一 | 否,优先使用 channelIndexMask |
6.调用JNI方法native_setup()
该方法的定义类如下:
@android11.0/frameworks/base/core/jni/android_media_AudioRecord.cpp
{"native_start", "(II)I", (void *)android_media_AudioRecord_start},{"native_stop", "()V", (void *)android_media_AudioRecord_stop},{"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",(void *)android_media_AudioRecord_setup},
native_start对应的JNI方法为: android_media_AudioRecord_setup
7.调用android_media_AudioRecord_setup:
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,jobject jaa, jintArray jSampleRate, jint channelMask, jint channelIndexMask,jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName,jlong nativeRecordInJavaObj)
{//ALOGV(">> Entering android_media_AudioRecord_setup");//ALOGV("sampleRate=%d, audioFormat=%d, channel mask=%x, buffSizeInBytes=%d "// "nativeRecordInJavaObj=0x%llX",// sampleRateInHertz, audioFormat, channelMask, buffSizeInBytes, nativeRecordInJavaObj);// 7.1 这里有一个channelMask的转化动作,传入什么值,返回相同的值,基本没有啥变化audio_channel_mask_t localChanMask = inChannelMaskToNative(channelMask);if (jSession == NULL) {ALOGE("Error creating AudioRecord: invalid session ID pointer");return (jint) AUDIO_JAVA_ERROR;}jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);if (nSession == NULL) {ALOGE("Error creating AudioRecord: Error retrieving session id pointer");return (jint) AUDIO_JAVA_ERROR;}audio_session_t sessionId = (audio_session_t) nSession[0];env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);nSession = NULL;sp<AudioRecord> lpRecorder = 0;audiorecord_callback_cookie *lpCallbackData = NULL;jclass clazz = env->GetObjectClass(thiz);if (clazz == NULL) {ALOGE("Can't find %s when setting up callback.", kClassPathName);return (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;}// if we pass in an existing *Native* AudioRecord, we don't need to create/initialize one.if (nativeRecordInJavaObj == 0) {if (jaa == 0) {ALOGE("Error creating AudioRecord: invalid audio attributes");return (jint) AUDIO_JAVA_ERROR;}if (jSampleRate == 0) {ALOGE("Error creating AudioRecord: invalid sample rates");return (jint) AUDIO_JAVA_ERROR;}jint elements[1];env->GetIntArrayRegion(jSampleRate, 0, 1, elements);int sampleRateInHertz = elements[0];// channel index mask takes priority over channel position masks.// 检查是否设置了channelIndexMask, 如果设置了,就会覆盖position masks(channelMask).if (channelIndexMask) {// Java channel index masks need the representation bits set.localChanMask = audio_channel_mask_from_representation_and_bits(AUDIO_CHANNEL_REPRESENTATION_INDEX,channelIndexMask);}// Java channel position masks map directly to the native definition// 检查计算的出来的localChanMask是否合法if (!audio_is_input_channel(localChanMask)) {ALOGE("Error creating AudioRecord: channel mask %#x is not valid.", localChanMask);return (jint) AUDIORECORD_ERROR_SETUP_INVALIDCHANNELMASK;}// 计算localChanMask中1的个数,用来表示channelCount ,比如传入的是CHANNEL_IN_STEREO = 12,对应就是4+8,即1100,包含2个通道.uint32_t channelCount = audio_channel_count_from_in_mask(localChanMask);// compare the format against the Java constants// 7.2 java层传入的audioFormat需要经过一个转换计算audio_format_t format = audioFormatToNative(audioFormat);if (format == AUDIO_FORMAT_INVALID) {ALOGE("Error creating AudioRecord: unsupported audio format %d.", audioFormat);return (jint) AUDIORECORD_ERROR_SETUP_INVALIDFORMAT;}// 计算每个采样点的位深度,传入的是ENCODING_PCM_16BIT = 2, 对应返回的值16.size_t bytesPerSample = audio_bytes_per_sample(format);if (buffSizeInBytes == 0) {ALOGE("Error creating AudioRecord: frameCount is 0.");return (jint) AUDIORECORD_ERROR_SETUP_ZEROFRAMECOUNT;}size_t frameSize = channelCount * bytesPerSample;size_t frameCount = buffSizeInBytes / frameSize;ScopedUtfChars opPackageNameStr(env, opPackageName);// create an uninitialized AudioRecord objectlpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));// read the AudioAttributes valuesauto paa = JNIAudioAttributeHelper::makeUnique();jint jStatus = JNIAudioAttributeHelper::nativeFromJava(env, jaa, paa.get());if (jStatus != (jint)AUDIO_JAVA_SUCCESS) {return jStatus;}ALOGV("AudioRecord_setup for source=%d tags=%s flags=%08x", paa->source, paa->tags, paa->flags);audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;if (paa->flags & AUDIO_FLAG_HW_HOTWORD) {flags = AUDIO_INPUT_FLAG_HW_HOTWORD;}// create the callback information:// this data will be passed with every AudioRecord callbacklpCallbackData = new audiorecord_callback_cookie;lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);// we use a weak reference so the AudioRecord object can be garbage collected.lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);lpCallbackData->busy = false;// 调用AudioRecord的set()方法,设置参数. const status_t status = lpRecorder->set(paa->source,sampleRateInHertz,format, // word length, PCMlocalChanMask,frameCount,recorderCallback,// callback_tlpCallbackData,// void* user0, // notificationFrames,true, // threadCanCallJavasessionId,AudioRecord::TRANSFER_DEFAULT,flags,-1, -1, // default uid, pidpaa.get());if (status != NO_ERROR) {ALOGE("Error creating AudioRecord instance: initialization check failed with status %d.",status);goto native_init_failure;}// Set caller name so it can be logged in destructor.// MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_JAVAlpRecorder->setCallerName("java");} else { // end if nativeRecordInJavaObj == 0)lpRecorder = (AudioRecord*)nativeRecordInJavaObj;// TODO: We need to find out which members of the Java AudioRecord might need to be// initialized from the Native AudioRecord// these are directly returned from getters:// mSampleRate// mRecordSource// mAudioFormat// mChannelMask// mChannelCount// mState (?)// mRecordingState (?)// mPreferredDevice// create the callback information:// this data will be passed with every AudioRecord callbacklpCallbackData = new audiorecord_callback_cookie;lpCallbackData->audioRecord_class = (jclass)env->NewGlobalRef(clazz);// we use a weak reference so the AudioRecord object can be garbage collected.lpCallbackData->audioRecord_ref = env->NewGlobalRef(weak_this);lpCallbackData->busy = false;}nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);if (nSession == NULL) {ALOGE("Error creating AudioRecord: Error retrieving session id pointer");goto native_init_failure;}// read the audio session ID back from AudioRecord in case a new session was created during set()nSession[0] = lpRecorder->getSessionId();env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);nSession = NULL;{const jint elements[1] = { (jint) lpRecorder->getSampleRate() };env->SetIntArrayRegion(jSampleRate, 0, 1, elements);}{ // scope for the lockMutex::Autolock l(sLock);sAudioRecordCallBackCookies.add(lpCallbackData);}// save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field// of the Java objectsetAudioRecord(env, thiz, lpRecorder);// save our newly created callback information in the "nativeCallbackCookie" field// of the Java object (in mNativeCallbackCookie) so we can free the memory in finalize()env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, (jlong)lpCallbackData);return (jint) AUDIO_JAVA_SUCCESS;// failure:
native_init_failure:env->DeleteGlobalRef(lpCallbackData->audioRecord_class);env->DeleteGlobalRef(lpCallbackData->audioRecord_ref);delete lpCallbackData;env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, 0);// lpRecorder goes out of scope, so reference count drops to zeroreturn (jint) AUDIORECORD_ERROR_SETUP_NATIVEINITFAILED;
}// 根据之前创建好的AudioRecord句柄,获取AudioRecord的实例,这种方法在JNI和Java之间的交互很常用
static sp<AudioRecord> getAudioRecord(JNIEnv* env, jobject thiz)
{Mutex::Autolock l(sLock);AudioRecord* const ar =(AudioRecord*)env->GetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);return sp<AudioRecord>(ar);
}
7.1 inChannelMaskToNative,把JAVA层传入的mask值转换成Native端的值:
当前mask传入的值是CHANNEL_IN_STEREO = 12
static inline audio_channel_mask_t inChannelMaskToNative(int channelMask)
{switch (channelMask) {case CHANNEL_IN_DEFAULT:return AUDIO_CHANNEL_NONE;default:return (audio_channel_mask_t)channelMask;}
}
7.2 java层传入的audioFormat需要经过一个转换计算audioFormatToNative:
当Java 端传入的audioFormat为ENCODING_PCM_16BIT = 2,计算后的值为 AUDIO_FORMAT_PCM_16_BIT = 0x1u. 从log中来看, 在Java端打印的audioFormat是2, 到这里之后就是0x1u了,如下面这份Log中打印的 “format 0x1”
2025-05-30 11:49:28.308 2330-2330/? V/AudioRecord: set(): inputSource 1, sampleRate 16000, format 0x1, channelMask 0x8000000f, frameCount 160, notificationFrames 0, sessionId 0, transferType 0, flags 0, opPackageName com.test.dummy uid -1, pid -1
static inline audio_format_t audioFormatToNative(int audioFormat)
{switch (audioFormat) {case ENCODING_PCM_16BIT:return AUDIO_FORMAT_PCM_16_BIT;case ENCODING_PCM_8BIT:return AUDIO_FORMAT_PCM_8_BIT;case ENCODING_PCM_FLOAT:return AUDIO_FORMAT_PCM_FLOAT;case ENCODING_AC3:return AUDIO_FORMAT_AC3;case ENCODING_E_AC3:return AUDIO_FORMAT_E_AC3;case ENCODING_DTS:return AUDIO_FORMAT_DTS;case ENCODING_DTS_HD:return AUDIO_FORMAT_DTS_HD;case ENCODING_MP3:return AUDIO_FORMAT_MP3;case ENCODING_AAC_LC:return AUDIO_FORMAT_AAC_LC;case ENCODING_AAC_HE_V1:return AUDIO_FORMAT_AAC_HE_V1;case ENCODING_AAC_HE_V2:return AUDIO_FORMAT_AAC_HE_V2;case ENCODING_IEC61937:return AUDIO_FORMAT_IEC61937;case ENCODING_DOLBY_TRUEHD:return AUDIO_FORMAT_DOLBY_TRUEHD;case ENCODING_AAC_ELD:return AUDIO_FORMAT_AAC_ELD;case ENCODING_AAC_XHE:return AUDIO_FORMAT_AAC_XHE;case ENCODING_AC4:return AUDIO_FORMAT_AC4;case ENCODING_E_AC3_JOC:return AUDIO_FORMAT_E_AC3_JOC;case ENCODING_DEFAULT:return AUDIO_FORMAT_DEFAULT;case ENCODING_DOLBY_MAT:return AUDIO_FORMAT_MAT;case ENCODING_OPUS:return AUDIO_FORMAT_OPUS;default:return AUDIO_FORMAT_INVALID;}
}
这些常亮在哪里定义的呢?
/* android11.0/frameworks/base/core/jni/android_media_AudioFormat.h */
#define ENCODING_PCM_16BIT 2
#define ENCODING_PCM_8BIT 3
#define ENCODING_PCM_FLOAT 4
#define ENCODING_AC3 5
#define ENCODING_E_AC3 6
#define ENCODING_DTS 7
#define ENCODING_DTS_HD 8
#define ENCODING_MP3 9
#define ENCODING_AAC_LC 10
#define ENCODING_AAC_HE_V1 11
#define ENCODING_AAC_HE_V2 12
#define ENCODING_IEC61937 13
#define ENCODING_DOLBY_TRUEHD 14
#define ENCODING_AAC_ELD 15
#define ENCODING_AAC_XHE 16
#define ENCODING_AC4 17
#define ENCODING_E_AC3_JOC 18
#define ENCODING_DOLBY_MAT 19
#define ENCODING_OPUS 20
转换后对应值如下:
/*android11.0/system/media/audio/include/system/audio-base.h */
/* Aliases */AUDIO_FORMAT_PCM_16_BIT = 0x1u, // (PCM | PCM_SUB_16_BIT)AUDIO_FORMAT_PCM_8_BIT = 0x2u, // (PCM | PCM_SUB_8_BIT)AUDIO_FORMAT_PCM_32_BIT = 0x3u, // (PCM | PCM_SUB_32_BIT)AUDIO_FORMAT_PCM_8_24_BIT = 0x4u, // (PCM | PCM_SUB_8_24_BIT)AUDIO_FORMAT_PCM_FLOAT = 0x5u, // (PCM | PCM_SUB_FLOAT)AUDIO_FORMAT_PCM_24_BIT_PACKED = 0x6u, // (PCM | PCM_SUB_24_BIT_PACKED)AUDIO_FORMAT_AAC_MAIN = 0x4000001u, // (AAC | AAC_SUB_MAIN)AUDIO_FORMAT_AAC_LC = 0x4000002u, // (AAC | AAC_SUB_LC)AUDIO_FORMAT_AAC_SSR = 0x4000004u, // (AAC | AAC_SUB_SSR)AUDIO_FORMAT_AAC_LTP = 0x4000008u, // (AAC | AAC_SUB_LTP)AUDIO_FORMAT_AAC_HE_V1 = 0x4000010u, // (AAC | AAC_SUB_HE_V1)AUDIO_FORMAT_AAC_SCALABLE = 0x4000020u, // (AAC | AAC_SUB_SCALABLE)AUDIO_FORMAT_AAC_ERLC = 0x4000040u, // (AAC | AAC_SUB_ERLC)AUDIO_FORMAT_AAC_LD = 0x4000080u, // (AAC | AAC_SUB_LD)AUDIO_FORMAT_AAC_HE_V2 = 0x4000100u, // (AAC | AAC_SUB_HE_V2)AUDIO_FORMAT_AAC_ELD = 0x4000200u, // (AAC | AAC_SUB_ELD)AUDIO_FORMAT_AAC_XHE = 0x4000300u, // (AAC | AAC_SUB_XHE)AUDIO_FORMAT_AAC_ADTS_MAIN = 0x1e000001u, // (AAC_ADTS | AAC_SUB_MAIN)AUDIO_FORMAT_AAC_ADTS_LC = 0x1e000002u, // (AAC_ADTS | AAC_SUB_LC)AUDIO_FORMAT_AAC_ADTS_SSR = 0x1e000004u, // (AAC_ADTS | AAC_SUB_SSR)AUDIO_FORMAT_AAC_ADTS_LTP = 0x1e000008u, // (AAC_ADTS | AAC_SUB_LTP)AUDIO_FORMAT_AAC_ADTS_HE_V1 = 0x1e000010u, // (AAC_ADTS | AAC_SUB_HE_V1)AUDIO_FORMAT_AAC_ADTS_SCALABLE = 0x1e000020u, // (AAC_ADTS | AAC_SUB_SCALABLE)AUDIO_FORMAT_AAC_ADTS_ERLC = 0x1e000040u, // (AAC_ADTS | AAC_SUB_ERLC)AUDIO_FORMAT_AAC_ADTS_LD = 0x1e000080u, // (AAC_ADTS | AAC_SUB_LD)AUDIO_FORMAT_AAC_ADTS_HE_V2 = 0x1e000100u, // (AAC_ADTS | AAC_SUB_HE_V2)AUDIO_FORMAT_AAC_ADTS_ELD = 0x1e000200u, // (AAC_ADTS | AAC_SUB_ELD)AUDIO_FORMAT_AAC_ADTS_XHE = 0x1e000300u, // (AAC_ADTS | AAC_SUB_XHE)AUDIO_FORMAT_AAC_LATM_LC = 0x25000002u, // (AAC_LATM | AAC_SUB_LC)AUDIO_FORMAT_AAC_LATM_HE_V1 = 0x25000010u, // (AAC_LATM | AAC_SUB_HE_V1)AUDIO_FORMAT_AAC_LATM_HE_V2 = 0x25000100u, // (AAC_LATM | AAC_SUB_HE_V2)AUDIO_FORMAT_E_AC3_JOC = 0xA000001u, // (E_AC3 | E_AC3_SUB_JOC)AUDIO_FORMAT_MAT_1_0 = 0x24000001u, // (MAT | MAT_SUB_1_0)AUDIO_FORMAT_MAT_2_0 = 0x24000002u, // (MAT | MAT_SUB_2_0)AUDIO_FORMAT_MAT_2_1 = 0x24000003u, // (MAT | MAT_SUB_2_1)
} audio_format_t;
到这里,Java层创建AudioRecod,到JNI 层初始化的流程已处理完成. 后续的分析请继续关注.
“专注AAOS架构与实战,欢迎关注一起探索车载开发。”