diff --git a/frameworks/js/napi/audio_common/include/audio_common_napi.h b/frameworks/js/napi/audio_common/include/audio_common_napi.h index 3bab65cb27f9bf847ba93f379c2e7637d33c7000..bcce9f5b99475a11976e419736e2c84ccea5c6e5 100644 --- a/frameworks/js/napi/audio_common/include/audio_common_napi.h +++ b/frameworks/js/napi/audio_common/include/audio_common_napi.h @@ -70,6 +70,9 @@ public: static bool IsLegalInputArgumentActiveDeviceType(int32_t deviceType); static bool IsLegalInputArgumentCommunicationDeviceType(int32_t deviceType); static bool IsLegalInputArgumentRingMode(int32_t ringerMode); + static bool IsLegalInputArgumentContentType(int32_t contentType); + static bool IsLegalInputArgumentStreamUsage(int32_t streamUsage); + static bool IsLegalInputArgumentAudioEffectMode(int32_t audioEffectMode); static AudioVolumeType GetNativeAudioVolumeType(int32_t volumeType); private: static constexpr int32_t MAX_VOLUME_LEVEL = 15; diff --git a/frameworks/js/napi/audio_common/src/audio_common_napi.cpp b/frameworks/js/napi/audio_common/src/audio_common_napi.cpp index 2078f2efbeb1240257954c29c1ddb467e9b4692a..8aeac13d23f6219d3018c39fb389e3ae33487b67 100644 --- a/frameworks/js/napi/audio_common/src/audio_common_napi.cpp +++ b/frameworks/js/napi/audio_common/src/audio_common_napi.cpp @@ -15,6 +15,7 @@ #include "audio_common_napi.h" #include "audio_log.h" +#include "audio_info.h" #include "audio_manager_napi.h" namespace OHOS { @@ -177,5 +178,62 @@ bool AudioCommonNapi::IsLegalInputArgumentRingMode(int32_t ringerMode) return result; } +bool AudioCommonNapi::IsLegalInputArgumentContentType(int32_t contentType) +{ + bool result = false; + switch (contentType) { + case CONTENT_TYPE_UNKNOWN: + case CONTENT_TYPE_SPEECH: + case CONTENT_TYPE_MUSIC: + case CONTENT_TYPE_MOVIE: + case CONTENT_TYPE_SONIFICATION: + case CONTENT_TYPE_RINGTONE: + case CONTENT_TYPE_ULTRASONIC: + result = true; + break; + default: + result = false; + break; + } + return result; +} + +bool AudioCommonNapi::IsLegalInputArgumentStreamUsage(int32_t streamUsage) +{ + bool result = false; + switch (streamUsage) { + case STREAM_USAGE_UNKNOWN: + case STREAM_USAGE_MEDIA: + case STREAM_USAGE_VOICE_COMMUNICATION: + case STREAM_USAGE_VOICE_ASSISTANT: + case STREAM_USAGE_ALARM: + case STREAM_USAGE_NOTIFICATION_RINGTONE: + case STREAM_USAGE_RANGING: + case STREAM_USAGE_ACCESSIBILITY: + case STREAM_USAGE_SYSTEM: + case STREAM_USAGE_VOICE_MODEM_COMMUNICATION: + result = true; + break; + default: + result = false; + break; + } + return result; +} + +bool AudioCommonNapi::IsLegalInputArgumentAudioEffectMode(int32_t audioEffectMode) +{ + bool result = false; + switch (audioEffectMode) { + case AudioEffectMode::EFFECT_NONE: + case AudioEffectMode::EFFECT_DEFAULT: + result = true; + break; + default: + result = false; + break; + } + return result; +} } // namespace AudioStandard } // namespace OHOS \ No newline at end of file diff --git a/frameworks/js/napi/audio_manager/src/audio_stream_mgr_napi.cpp b/frameworks/js/napi/audio_manager/src/audio_stream_mgr_napi.cpp index 5f155d8f492d5c29887391b7adbd5b5c0ac6ee5e..efb8b6d8733b1d16620b7d62c8d2d61ace4cbde1 100644 --- a/frameworks/js/napi/audio_manager/src/audio_stream_mgr_napi.cpp +++ b/frameworks/js/napi/audio_manager/src/audio_stream_mgr_napi.cpp @@ -37,6 +37,7 @@ namespace { const int ARGS_ONE = 1; const int ARGS_TWO = 2; + const int ARGS_THREE = 3; const int PARAM0 = 0; const int PARAM1 = 1; @@ -53,22 +54,6 @@ namespace { napi_get_cb_info(env, info, &argc, argv, &thisVar, &data) } -struct AudioStreamMgrAsyncContext { - napi_env env; - napi_async_work work; - napi_deferred deferred; - napi_ref callbackRef = nullptr; - int32_t status = SUCCESS; - int32_t volType; - bool isTrue; - bool isLowLatencySupported; - bool isActive; - AudioStreamInfo audioStreamInfo; - AudioStreamMgrNapi *objectInfo; - vector> audioRendererChangeInfos; - vector> audioCapturerChangeInfos; -}; - AudioStreamMgrNapi::AudioStreamMgrNapi() : env_(nullptr), audioStreamMngr_(nullptr) {} @@ -167,7 +152,7 @@ static void SetDeviceDescriptors(const napi_env& env, napi_value &jsChangeInfoOb napi_set_named_property(env, jsChangeInfoObj, "deviceDescriptors", jsDeviceDescriptorsObj); } -static void GetCurrentRendererChangeInfosCallbackComplete(napi_env env, napi_status status, void *data) +void AudioStreamMgrNapi::GetCurrentRendererChangeInfosCallbackComplete(napi_env env, napi_status status, void *data) { auto asyncContext = static_cast(data); napi_value result[ARGS_TWO] = {0}; @@ -216,7 +201,7 @@ static void GetCurrentRendererChangeInfosCallbackComplete(napi_env env, napi_sta delete asyncContext; } -static void GetCurrentCapturerChangeInfosCallbackComplete(napi_env env, napi_status status, void *data) +void AudioStreamMgrNapi::GetCurrentCapturerChangeInfosCallbackComplete(napi_env env, napi_status status, void *data) { auto asyncContext = static_cast(data); napi_value result[ARGS_TWO] = {0}; @@ -280,7 +265,7 @@ napi_value AudioStreamMgrNapi::Init(napi_env env, napi_value exports) DECLARE_NAPI_FUNCTION("getCurrentAudioCapturerInfoArray", GetCurrentAudioCapturerInfos), DECLARE_NAPI_FUNCTION("isAudioRendererLowLatencySupported", IsAudioRendererLowLatencySupported), DECLARE_NAPI_FUNCTION("isActive", IsStreamActive), - + DECLARE_NAPI_FUNCTION("getAudioEffectInfoArray", GetEffectInfoArray), }; status = napi_define_class(env, AUDIO_STREAM_MGR_NAPI_CLASS_NAME.c_str(), NAPI_AUTO_LENGTH, Construct, nullptr, @@ -740,7 +725,7 @@ bool AudioStreamMgrNapi::ParseAudioStreamInfo(napi_env env, napi_value root, Aud return true; } -static void CommonCallbackRoutine(napi_env env, AudioStreamMgrAsyncContext* &asyncContext, +void AudioStreamMgrNapi::CommonCallbackRoutine(napi_env env, AudioStreamMgrAsyncContext* &asyncContext, const napi_value &valueParam) { napi_value result[ARGS_TWO] = {0}; @@ -794,8 +779,7 @@ void AudioStreamMgrNapi::IsLowLatencySupportedCallback(napi_env env, napi_status } } - -static void IsTrueAsyncCallbackComplete(napi_env env, napi_status status, void *data) +void AudioStreamMgrNapi::IsTrueAsyncCallbackComplete(napi_env env, napi_status status, void *data) { auto asyncContext = static_cast(data); napi_value valueParam = nullptr; @@ -880,5 +864,119 @@ napi_value AudioStreamMgrNapi::IsStreamActive(napi_env env, napi_callback_info i return result; } + +void AudioStreamMgrNapi::GetEffectInfoArrayCallbackComplete(napi_env env, napi_status status, void *data) +{ + uint32_t i; + auto asyncContext = static_cast(data); + napi_value result[ARGS_TWO] = {0}; + napi_value jsEffectInofObj = nullptr; + napi_value retVal; + if (!asyncContext->status) { + napi_create_array_with_length(env, asyncContext->audioSceneEffectInfo.mode.size(), &result[PARAM1]); + napi_create_object(env, &jsEffectInofObj); + for (i = 0; i < asyncContext->audioSceneEffectInfo.mode.size(); i++) { + napi_create_uint32(env, asyncContext->audioSceneEffectInfo.mode[i], &jsEffectInofObj); + napi_set_element(env, result[PARAM1], i, jsEffectInofObj); + } + napi_get_undefined(env, &result[PARAM0]); + } else { + napi_value message = nullptr; + std::string messageValue = AudioCommonNapi::getMessageByCode(asyncContext->status); + napi_create_string_utf8(env, messageValue.c_str(), NAPI_AUTO_LENGTH, &message); + napi_value code = nullptr; + napi_create_string_utf8(env, (std::to_string(asyncContext->status)).c_str(), NAPI_AUTO_LENGTH, &code); + napi_create_error(env, code, message, &result[PARAM0]); + napi_get_undefined(env, &result[PARAM1]); + } + if (asyncContext->deferred) { + if (!asyncContext->status) { + napi_resolve_deferred(env, asyncContext->deferred, result[PARAM1]); + } else { + napi_reject_deferred(env, asyncContext->deferred, result[PARAM0]); + } + } else { + napi_value callback = nullptr; + napi_get_reference_value(env, asyncContext->callbackRef, &callback); + napi_call_function(env, nullptr, callback, ARGS_TWO, result, &retVal); + napi_delete_reference(env, asyncContext->callbackRef); + } + napi_delete_async_work(env, asyncContext->work); + delete asyncContext; +} + +napi_value AudioStreamMgrNapi::GetEffectInfoArray(napi_env env, napi_callback_info info) +{ + napi_status status; + const int32_t refCount = 1; + napi_value result = nullptr; + GET_PARAMS(env, info, ARGS_THREE); + unique_ptr asyncContext = make_unique(); + + if (!asyncContext) { + AUDIO_ERR_LOG("AudioStreamMgrNapi:Audio manager async context failed"); + return result; + } + status = napi_unwrap(env, thisVar, reinterpret_cast(&asyncContext->objectInfo)); + if ((status == napi_ok && asyncContext->objectInfo != nullptr) + && (asyncContext->objectInfo->audioStreamMngr_ != nullptr)) { + for (size_t i = PARAM0; i < argc; i++) { + napi_valuetype valueType = napi_undefined; + napi_typeof(env, argv[i], &valueType); + if (i == PARAM0 && valueType == napi_number) { + napi_get_value_int32(env, argv[i], &asyncContext->contentType); + if (!AudioCommonNapi::IsLegalInputArgumentContentType(asyncContext->contentType)) { + asyncContext->status = (asyncContext->status == + NAPI_ERR_INVALID_PARAM) ? NAPI_ERR_INVALID_PARAM : NAPI_ERR_UNSUPPORTED; + } + } else if (i == PARAM1 && valueType == napi_number) { + napi_get_value_int32(env, argv[i], &asyncContext->streamUsage); + if (!AudioCommonNapi::IsLegalInputArgumentStreamUsage(asyncContext->streamUsage)) { + asyncContext->status = (asyncContext->status == + NAPI_ERR_INVALID_PARAM) ? NAPI_ERR_INVALID_PARAM : NAPI_ERR_UNSUPPORTED; + } + } else if (i == PARAM2) { + if (valueType == napi_function) { + napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef); + } + break; + } else { + asyncContext->status = NAPI_ERR_INVALID_PARAM; + } + } + + if (asyncContext->callbackRef == nullptr) { + napi_create_promise(env, &asyncContext->deferred, &result); + } else { + napi_get_undefined(env, &result); + } + + napi_value resource = nullptr; + napi_create_string_utf8(env, "getEffectInfoArray", NAPI_AUTO_LENGTH, &resource); + status = napi_create_async_work( + env, nullptr, resource, + [](napi_env env, void *data) { + auto context = static_cast(data); + if (context->status == SUCCESS) { + ContentType contentType = static_cast(context->contentType); + StreamUsage streamUsage = static_cast(context->streamUsage); + context->status = context->objectInfo->audioStreamMngr_->GetEffectInfoArray(context->audioSceneEffectInfo, + contentType, streamUsage); + } + }, + GetEffectInfoArrayCallbackComplete, static_cast(asyncContext.get()), &asyncContext->work); + if (status != napi_ok) { + result = nullptr; + } else { + status = napi_queue_async_work(env, asyncContext->work); + if (status == napi_ok) { + asyncContext.release(); + } else { + result = nullptr; + } + } + } + return result; +} } // namespace AudioStandard } // namespace OHOS diff --git a/frameworks/js/napi/audio_renderer/src/audio_renderer_napi.cpp b/frameworks/js/napi/audio_renderer/src/audio_renderer_napi.cpp index 2ba9380e4f1d9fb0543799c70fb231a100ffe9d6..3270cc750c1eba870b343cb50a0102040a5ec4dc 100644 --- a/frameworks/js/napi/audio_renderer/src/audio_renderer_napi.cpp +++ b/frameworks/js/napi/audio_renderer/src/audio_renderer_napi.cpp @@ -47,6 +47,7 @@ napi_ref AudioRendererNapi::interruptHintType_ = nullptr; napi_ref AudioRendererNapi::interruptForceType_ = nullptr; napi_ref AudioRendererNapi::audioState_ = nullptr; napi_ref AudioRendererNapi::sampleFormat_ = nullptr; +napi_ref AudioRendererNapi::audioEffectMode_ = nullptr; mutex AudioRendererNapi::createMutex_; int32_t AudioRendererNapi::isConstructSuccess_ = SUCCESS; @@ -343,6 +344,36 @@ napi_value AudioRendererNapi::CreateInterruptHintTypeObject(napi_env env) return result; } +napi_value AudioRendererNapi::CreateAudioEffectModeObject(napi_env env) +{ + napi_value result = nullptr; + napi_status status; + std::string propName; + + status = napi_create_object(env, &result); + if (status == napi_ok) { + for (auto &iter: effectModeMap) { + propName = iter.first; + status = AddNamedProperty(env, result, propName, iter.second); + if (status != napi_ok) { + HiLog::Error(LABEL, "Failed to add named prop!"); + break; + } + propName.clear(); + } + if (status == napi_ok) { + status = napi_create_reference(env, result, REFERENCE_CREATION_COUNT, &audioEffectMode_); + if (status == napi_ok) { + return result; + } + } + } + HiLog::Error(LABEL, "CreateAudioEffectModeObject is Failed!"); + napi_get_undefined(env, &result); + + return result; +} + static void SetDeviceDescriptors(const napi_env& env, napi_value &valueParam, const DeviceInfo &deviceInfo) { SetValueInt32(env, "deviceRole", static_cast(deviceInfo.deviceRole), valueParam); @@ -407,6 +438,8 @@ napi_value AudioRendererNapi::Init(napi_env env, napi_value exports) DECLARE_NAPI_FUNCTION("getMaxStreamVolume", GetMaxStreamVolume), DECLARE_NAPI_FUNCTION("getCurrentOutputDevices", GetCurrentOutputDevices), DECLARE_NAPI_FUNCTION("getUnderflowCount", GetUnderflowCount), + DECLARE_NAPI_FUNCTION("getAudioEffectMode", GetAudioEffectMode), + DECLARE_NAPI_FUNCTION("setAudioEffectMode", SetAudioEffectMode), DECLARE_NAPI_GETTER("state", GetState) }; @@ -418,6 +451,7 @@ napi_value AudioRendererNapi::Init(napi_env env, napi_value exports) DECLARE_NAPI_PROPERTY("InterruptHint", CreateInterruptHintTypeObject(env)), DECLARE_NAPI_PROPERTY("AudioState", CreateAudioStateObject(env)), DECLARE_NAPI_PROPERTY("AudioSampleFormat", CreateAudioSampleFormatObject(env)), + DECLARE_NAPI_PROPERTY("AudioEffectMode", CreateAudioEffectModeObject(env)), }; status = napi_define_class(env, AUDIO_RENDERER_NAPI_CLASS_NAME.c_str(), NAPI_AUTO_LENGTH, Construct, nullptr, @@ -2101,6 +2135,139 @@ napi_value AudioRendererNapi::GetStreamInfo(napi_env env, napi_callback_info inf return result; } +napi_value AudioRendererNapi::GetAudioEffectMode(napi_env env, napi_callback_info info) +{ + napi_status status; + const int32_t refCount = 1; + napi_value result = nullptr; + + GET_PARAMS(env, info, ARGS_ONE); + + unique_ptr asyncContext = make_unique(); + status = napi_unwrap(env, thisVar, reinterpret_cast(&asyncContext->objectInfo)); + if (status == napi_ok && asyncContext->objectInfo != nullptr) { + if (argc > PARAM0) { + napi_valuetype valueType = napi_undefined; + napi_typeof(env, argv[PARAM0], &valueType); + if (valueType == napi_function) { + napi_create_reference(env, argv[PARAM0], refCount, &asyncContext->callbackRef); + } + } + + if (asyncContext->callbackRef == nullptr) { + napi_create_promise(env, &asyncContext->deferred, &result); + } else { + napi_get_undefined(env, &result); + } + + napi_value resource = nullptr; + napi_create_string_utf8(env, "GetAudioEffectMode", NAPI_AUTO_LENGTH, &resource); + + status = napi_create_async_work( + env, nullptr, resource, + [](napi_env env, void *data) { + auto context = static_cast(data); + if (!CheckContextStatus(context)) { + return; + } + context->intValue = context->objectInfo->audioRenderer_->GetAudioEffectMode(); + context->status = SUCCESS; + }, + GetIntValueAsyncCallbackComplete, static_cast(asyncContext.get()), &asyncContext->work); + if (status != napi_ok) { + result = nullptr; + } else { + status = napi_queue_async_work(env, asyncContext->work); + if (status == napi_ok) { + asyncContext.release(); + } else { + result = nullptr; + } + } + } + + return result; +} + +napi_value AudioRendererNapi::SetAudioEffectMode(napi_env env, napi_callback_info info) +{ + napi_status status; + const int32_t refCount = 1; + napi_value result = nullptr; + + GET_PARAMS(env, info, ARGS_TWO); + unique_ptr asyncContext = make_unique(); + if (argc < ARGS_ONE) { + asyncContext->status = NAPI_ERR_INVALID_PARAM; + } + + status = napi_unwrap(env, thisVar, reinterpret_cast(&asyncContext->objectInfo)); + if (status == napi_ok && asyncContext->objectInfo != nullptr) { + for (size_t i = PARAM0; i < argc; i++) { + napi_valuetype valueType = napi_undefined; + napi_typeof(env, argv[i], &valueType); + + if (i == PARAM0 && valueType == napi_number) { + napi_get_value_int32(env, argv[PARAM0], &asyncContext->audioEffectMode); + if (!AudioCommonNapi::IsLegalInputArgumentAudioEffectMode(asyncContext->audioEffectMode)) { + asyncContext->status = asyncContext->status == + NAPI_ERR_INVALID_PARAM ? NAPI_ERR_INVALID_PARAM : NAPI_ERR_UNSUPPORTED; + } + } else if (i == PARAM1) { + if (valueType == napi_function) { + napi_create_reference(env, argv[i], refCount, &asyncContext->callbackRef); + } + break; + } else { + asyncContext->status = NAPI_ERR_INVALID_PARAM; + } + } + + if (asyncContext->callbackRef == nullptr) { + napi_create_promise(env, &asyncContext->deferred, &result); + } else { + napi_get_undefined(env, &result); + } + + napi_value resource = nullptr; + napi_create_string_utf8(env, "SetAudioEffectMode", NAPI_AUTO_LENGTH, &resource); + + status = napi_create_async_work( + env, nullptr, resource, + [](napi_env env, void *data) { + auto context = static_cast(data); + if (!CheckContextStatus(context)) { + return; + } + if (context->status == SUCCESS) { + AudioEffectMode audioEffectMode = static_cast(context->audioEffectMode); + int32_t audioClientInvalidParamsErr = -2; + context->intValue = context->objectInfo->audioRenderer_->SetAudioEffectMode(audioEffectMode); + if (context->intValue == SUCCESS) { + context->status = SUCCESS; + } else if (context->intValue == audioClientInvalidParamsErr) { + context->status = NAPI_ERR_UNSUPPORTED; + } else { + context->status = NAPI_ERR_SYSTEM; + } + } + }, + VoidAsyncCallbackComplete, static_cast(asyncContext.get()), &asyncContext->work); + if (status != napi_ok) { + result = nullptr; + } else { + status = napi_queue_async_work(env, asyncContext->work); + if (status == napi_ok) { + asyncContext.release(); + } else { + result = nullptr; + } + } + } + + return result; +} + napi_value AudioRendererNapi::GetState(napi_env env, napi_callback_info info) { napi_value jsThis = nullptr; diff --git a/frameworks/native/audioeffect/BUILD.gn b/frameworks/native/audioeffect/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..ff7868791e783e912d00fac8eb239a9e5d06700d --- /dev/null +++ b/frameworks/native/audioeffect/BUILD.gn @@ -0,0 +1,75 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") +import("//foundation/multimedia/audio_framework/audio_ohcore.gni") +import("//foundation/multimedia/audio_framework/config.gni") + +pulseaudio_dir = "//third_party/pulseaudio" + +config("audio_effect_config") { + include_dirs = [ + "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiorenderer/include", + "//foundation/multimedia/audio_framework/frameworks/native/audioeffect/include", + "//foundation/multimedia/audio_framework/frameworks/native/audiostream/include", + "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiocommon/include", + "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiomanager/include", + "//foundation/multimedia/audio_framework/services/audio_service/client/include", + "//foundation/multimedia/audio_framework/services/audio_service/test/example", + "//commonlibrary/c_utils/base/include", + "$pulseaudio_dir/src", + "$pulseaudio_dir/confgure/src", + ] + + cflags = [ + "-Wall", + "-Werror", + ] +} + +ohos_shared_library("audio_effect") { + sanitize = { + cfi = true + debug = false + blocklist = "//foundation/multimedia/audio_framework/cfi_blocklist.txt" + } + install_enable = true + + configs = [ ":audio_effect_config" ] + + if ("${product_name}" == "ohcore") { + defines = [ "OHCORE" ] + } + + # include_dirs = audio_gateway_include_dirs + # include_dirs += [ multimedia_audio_framework_pulse_audio ] + + sources = [ + "src/audio_effect_chain_adapter.c", + "src/audio_effect_chain_manager.cpp", + ] + +# public_configs = [ ":audio_external_library_config" ] + + external_deps = [ + "c_utils:utils", + "hiviewdfx_hilog_native:libhilog", + "ipc:ipc_single", + ] + + version_script = "../../../audio_framework.versionscript" + innerapi_tags = [ "platformsdk" ] + + part_name = "audio_framework" + subsystem_name = "multimedia" +} \ No newline at end of file diff --git a/frameworks/native/audioeffect/include/audio_effect_chain_adapter.h b/frameworks/native/audioeffect/include/audio_effect_chain_adapter.h new file mode 100644 index 0000000000000000000000000000000000000000..a5f45e1792c6a6fe788c55a86fe2ad23d13867ee --- /dev/null +++ b/frameworks/native/audioeffect/include/audio_effect_chain_adapter.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef AUDIO_EFFECT_CHAIN_ADAPTER_H +#define AUDIO_EFFECT_CHAIN_ADAPTER_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct EffectChainAdapter { + void *wapper; // AudioEffectChainManager instance + void *bufIn; // input buffer, output of the effect sink + void *bufOut; // output buffer for the final processed output +}; + +int32_t LoadEffectChainAdapter(struct EffectChainAdapter *adapter); +int32_t UnLoadEffectChainAdapter(struct EffectChainAdapter *adapter); + +// functions for cpp +int32_t FillinEffectChainWapper(struct EffectChainAdapter *adapter); +int32_t EffectChainManagerProcess(struct EffectChainAdapter *adapter, char *streamType); +int32_t EffectChainManagerGetFrameLen(struct EffectChainAdapter *adapter); +int32_t EffectChainManagerReturnValue(struct EffectChainAdapter *adapter, int32_t i); + +#ifdef __cplusplus +} +#endif +#endif // AUDIO_EFFECT_CHAIN_ADAPTER_H diff --git a/frameworks/native/audioeffect/include/audio_effect_chain_manager.h b/frameworks/native/audioeffect/include/audio_effect_chain_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..8f704a5d48067dc00c55c3dd3d9a3567de3451b4 --- /dev/null +++ b/frameworks/native/audioeffect/include/audio_effect_chain_manager.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef AUDIO_EFFECT_CHAIN_MANAGER_H +#define AUDIO_EFFECT_CHAIN_MANAGER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_info.h" +#include "audio_effect.h" + + +namespace OHOS { + namespace AudioStandard { + class AudioEffectChain { + private: + std::string sceneType; + std::vector standByEffectHandles; + + public: + AudioEffectChain(std::string scene); + + ~AudioEffectChain(); + + // 设置standByEffectChain + void SetEffectChain(std::vector effectHandles); + + // 依次执行stanbyEffectChain中的handle + void ApplyEffectChain(void *bufIn, void *bufOut); + }; + + class AudioEffectChainManager { + private: + std::map EffectToLibraryEntryMap; // {"hvs": libEntryT} + std::map > EffectChainToEffectsMap; // {"EFFECTCHAIN_SPK_MUSIC": [hvs, eq, histen]} + std::map SceneTypeToEffectChainMap; // {"STREAM_MUSIC": AudioEffectChain} init时创建AudioEffectChain对象 + int32_t frameLen = 1024; + + public: + AudioEffectChainManager(); + ~AudioEffectChainManager(); + static AudioEffectChainManager *GetInstance(); + // 初始化 SceneTypeToEffectChainMap, mode和device与effectchain一一对应 + void InitAudioEffectChain(std::vector effectChains, std::vector > &effectLibraryList); + // 设置根据mdInfo创建effect handle + int32_t SetAudioEffectChain(std::string sceneType, std::string effectChain); + int32_t ApplyAudioEffectChain(std::string sceneType, void *bufIn, void *bufOut); + int32_t GetFrameLen(); + int32_t SetFrameLen(int32_t frameLen); + int32_t ReturnValue(int32_t i); + }; + + + } // namespace AudioStandard +} // namespace OHOS +#endif // AUDIO_EFFECT_CHAIN_MANAGER_H \ No newline at end of file diff --git a/frameworks/native/audioeffect/src/audio_effect_chain_adapter.c b/frameworks/native/audioeffect/src/audio_effect_chain_adapter.c new file mode 100644 index 0000000000000000000000000000000000000000..247e0789923be35ad906ccf5ba5544f536f6ac9c --- /dev/null +++ b/frameworks/native/audioeffect/src/audio_effect_chain_adapter.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "audio_effect_chain_adapter.h" +#include "audio_log.h" + +#ifdef __cplusplus +extern "C" { +#endif + +const int32_t SUCCESS = 0; +const int32_t ERROR = -1; + +int32_t LoadEffectChainAdapter(struct EffectChainAdapter *adapter) +{ + if (adapter == NULL) { + AUDIO_ERR_LOG("%{public}s: Invalid parameter", __func__); + return ERROR; + } + + if (FillinEffectChainWapper(adapter) != SUCCESS) { + AUDIO_ERR_LOG("%{public}s: Device not supported", __func__); + free(adapter); + return ERROR; + } + + return SUCCESS; +} + +int32_t UnLoadEffectChainAdapter(struct EffectChainAdapter *ecAdapter) +{ + if (ecAdapter == NULL) { + AUDIO_ERR_LOG("%{public}s: Invalid parameter", __func__); + return ERROR; + } + + free(ecAdapter); + + return SUCCESS; +} + +#ifdef __cplusplus +} +#endif \ No newline at end of file diff --git a/frameworks/native/audioeffect/src/audio_effect_chain_manager.cpp b/frameworks/native/audioeffect/src/audio_effect_chain_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1eaef4c318a7e410d58493afdae3c62580c34c7c --- /dev/null +++ b/frameworks/native/audioeffect/src/audio_effect_chain_manager.cpp @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2021-2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "audio_effect_chain_adapter.h" +#include "audio_effect_chain_manager.h" +#include "audio_log.h" +#include "audio_errors.h" +#include "audio_info.h" + +using namespace OHOS::AudioStandard; + +int32_t FillinEffectChainWapper(struct EffectChainAdapter *adapter) { + CHECK_AND_RETURN_RET_LOG(adapter != nullptr, ERR_INVALID_HANDLE, "null EffectChainAdapter"); + AudioEffectChainManager *instance = AudioEffectChainManager::GetInstance(); + if (instance != nullptr) { + adapter->wapper = static_cast(instance); + } else { + adapter->wapper = nullptr; + return ERROR; + } + return SUCCESS; +} + +int32_t EffectChainManagerProcess(struct EffectChainAdapter *adapter, char *sceneType) { + CHECK_AND_RETURN_RET_LOG(adapter != nullptr, ERR_INVALID_HANDLE, "null EffectChainAdapter"); + AudioEffectChainManager *audioEffectChainManager = static_cast(adapter->wapper); + CHECK_AND_RETURN_RET_LOG(audioEffectChainManager != nullptr, ERR_INVALID_HANDLE, "null audioEffectChainManager"); + std::string sceneTypeString(sceneType); + if (!audioEffectChainManager->ApplyAudioEffectChain(sceneTypeString, adapter->bufIn, adapter->bufOut)) { + return ERROR; + } + return SUCCESS; +} + +int32_t EffectChainManagerGetFrameLen(struct EffectChainAdapter *adapter) +{ + CHECK_AND_RETURN_RET_LOG(adapter != nullptr, ERR_INVALID_HANDLE, "null EffectChainAdapter"); + AudioEffectChainManager *audioEffectChainManager = static_cast(adapter->wapper); + CHECK_AND_RETURN_RET_LOG(audioEffectChainManager != nullptr, ERR_INVALID_HANDLE, "null audioEffectChainManager"); + return audioEffectChainManager->GetFrameLen(); +} + +int32_t EffectChainManagerReturnValue(struct EffectChainAdapter *adapter, int32_t i) +{ + AUDIO_INFO_LOG("xjl: AdapterReturnValue start, value=%{public}d", i); + AudioEffectChainManager *audioEffectChainManager = static_cast(adapter->wapper); + int j = audioEffectChainManager->ReturnValue(i); + AUDIO_INFO_LOG("xjl: AdapterReturnValue end, value=%{public}d", i); + return j; +} + +namespace OHOS { + namespace AudioStandard { + AudioEffectChain::AudioEffectChain(std::string scene) { + sceneType = scene; + } + + AudioEffectChain::~AudioEffectChain() {} + + void AudioEffectChain::SetEffectChain(std::vector effectHandles) { + standByEffectHandles.clear(); + for (EffectHandleT *handleT: effectHandles) { + standByEffectHandles.emplace_back(handleT); + } + } + + void AudioEffectChain::ApplyEffectChain(void *bufIn, void *bufOut) { +// AUDIO_ERR_LOG(" could not find library %{public}s to load effect %{public}s", +// effect.libraryName.c_str(), effect.name.c_str()); + + AUDIO_INFO_LOG("apply effect chain beginning"); + for (EffectHandleT *handle: standByEffectHandles) { + AUDIO_INFO_LOG("run effect: %{public}p", handle); +// handle(bufIn, bufOut)); + } + } + + LibEntryT *findlibOfEffect(std::string effect, std::vector > &effectLibraryList) { + for (const std::unique_ptr &lib : effectLibraryList) { + for (std::unique_ptr &e: lib->effects) { + if (e->name == effect) { + return lib.get(); + } + } + } + return nullptr; + } + + int32_t AudioEffectChainManager::ReturnValue(int32_t i) + { + AUDIO_INFO_LOG("xyq: come into AudioEffectChainManager::ReturnValue, value=%{public}d", i); + return i; + } + + int32_t AudioEffectChainManager::SetFrameLen(int32_t frameLength) + { + frameLen = frameLength; + return SUCCESS; + } + + int32_t AudioEffectChainManager::GetFrameLen() + { + return frameLen; + } + + void AudioEffectChainManager::InitAudioEffectChain(std::vector effectChains, + std::vector > &effectLibraryList) { + + AUDIO_INFO_LOG("xjl: init audio effect chain in AudioEffectChainManager step1"); + std::set effectSet; + for (EffectChain efc: effectChains){ + for(std::string effect: efc.apply){ + effectSet.insert(effect); + } + } + + AUDIO_INFO_LOG("xjl: init audio effect chain in AudioEffectChainManager step2"); + // make EffectToLibraryEntryMap + for (std::string effect: effectSet) { + auto *libEntry = findlibOfEffect(effect, effectLibraryList); + if (!libEntry) { +// std::cout << "libEntry is nil while find effect:" << effect.name << std::endl; + } + + EffectToLibraryEntryMap[effect] = libEntry; + } + + AUDIO_INFO_LOG("xjl: init audio effect chain in AudioEffectChainManager step3"); + // make EffectChainToEffectsMap + for (EffectChain efc: effectChains) { + std::string key = efc.name; + std::vector effects; + for (std::string effectName: efc.apply) { + effects.emplace_back(effectName); + } + EffectChainToEffectsMap[key] = effects; + } + AUDIO_INFO_LOG("xjl: init audio effect chain in AudioEffectChainManager step4"); + + SetAudioEffectChain("SCENE_MUSIC", "default"); + SetAudioEffectChain("SCENE_MOVIE", "default"); + SetAudioEffectChain("SCENE_GAME", "default"); + SetAudioEffectChain("SCENE_SPEECH", "default"); + SetAudioEffectChain("SCENE_RING", "default"); + SetAudioEffectChain("SCENE_OTHERS", "default"); + + AUDIO_INFO_LOG("xjl: EffectToLibraryEntryMap size %{public}d", EffectToLibraryEntryMap.size()); + AUDIO_INFO_LOG("xjl: EffectChainToEffectsMap size %{public}d", EffectChainToEffectsMap.size()); + AUDIO_INFO_LOG("xjl: SceneTypeToEffectChainMap size %{public}d", SceneTypeToEffectChainMap.size()); + } + + int32_t AudioEffectChainManager::SetAudioEffectChain(std::string sceneType, std::string effectChain) { + AudioEffectChain *audioEffectChain = new AudioEffectChain(sceneType); + SceneTypeToEffectChainMap[sceneType] = audioEffectChain; + + std::vector effectNames = EffectChainToEffectsMap[effectChain]; + + std::vector < EffectHandleT * > effectHandles; +// std::cout << "create effectchain:" << effectChain << std::endl; + for (std::string effect: effectNames) { + EffectHandleT handle; + EffectToLibraryEntryMap[effect]->desc->CreateEffect(&effect, 0, 0, &handle); + effectHandles.emplace_back(&handle); + } + + audioEffectChain->SetEffectChain(effectHandles); + return 0; + } + + int32_t AudioEffectChainManager::ApplyAudioEffectChain(std::string sceneType, void *bufIn, void *bufOut) { + float *bufferIn = (float *)bufIn; + float *bufferOut = (float *)bufOut; + if (sceneType == "SCENE_MUSIC") { + for (int i = 0; i < frameLen * 2; i++) { + bufferOut[i] = bufferIn[i] * 3; + } + } + else { + for (int i = 0; i < frameLen * 2; i++) { + bufferOut[i] = bufferIn[i] / 3; + } + } + AUDIO_INFO_LOG("xjl: ApplyAudioEffectChain running %{public}s", sceneType.c_str()); + // auto *audioEffectChain = SceneTypeToEffectChainMap[sceneType]; + // audioEffectChain->ApplyEffectChain(bufIn, bufOut); + return 0; + } + + AudioEffectChainManager::AudioEffectChainManager() {} + + AudioEffectChainManager::~AudioEffectChainManager() {} + + AudioEffectChainManager *AudioEffectChainManager::GetInstance() { + static AudioEffectChainManager audioEffectChainManager; + return &audioEffectChainManager; + } + } +} \ No newline at end of file diff --git a/frameworks/native/audiopolicy/include/audio_policy_manager.h b/frameworks/native/audiopolicy/include/audio_policy_manager.h index 2fd3805412de673a887db484b3c93c7fc8177e54..deea0db29e5f64898743ca07f36388f710d77dcc 100644 --- a/frameworks/native/audiopolicy/include/audio_policy_manager.h +++ b/frameworks/native/audiopolicy/include/audio_policy_manager.h @@ -20,6 +20,7 @@ #include "audio_capturer_state_change_listener_stub.h" #include "audio_client_tracker_callback_stub.h" #include "audio_info.h" +#include "audio_effect.h" #include "audio_interrupt_callback.h" #include "audio_policy_manager_listener_stub.h" #include "audio_renderer_state_change_listener_stub.h" @@ -210,6 +211,8 @@ public: int32_t RegisterAudioPolicyServerDiedCb(const int32_t clientPid, const std::weak_ptr &callback); int32_t UnregisterAudioPolicyServerDiedCb(const int32_t clientPid); + + int32_t QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig); private: AudioPolicyManager() {} ~AudioPolicyManager() {} diff --git a/frameworks/native/audiopolicy/test/unittest/manager_test/src/audio_manager_unit_test.cpp b/frameworks/native/audiopolicy/test/unittest/manager_test/src/audio_manager_unit_test.cpp index e4cb8499b46e41bec94197fd958fa6972c08c0b7..36903c07e75a21926e4484cf6f6805471cb7f8b4 100644 --- a/frameworks/native/audiopolicy/test/unittest/manager_test/src/audio_manager_unit_test.cpp +++ b/frameworks/native/audiopolicy/test/unittest/manager_test/src/audio_manager_unit_test.cpp @@ -2456,5 +2456,357 @@ HWTEST(AudioManagerUnitTest, RegisterFocusInfoChangeCallback_005, TestSize.Level ret = AudioSystemManager::GetInstance()->UnregisterFocusInfoChangeCallback(callback2); EXPECT_EQ(ret, SUCCESS); } + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_001 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_001, TestSize.Level1) +{ + // STREAM_MUSIC + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo = {}; + ContentType contentType = CONTENT_TYPE_UNKNOWN; + StreamUsage streamUsage = STREAM_USAGE_UNKNOWN; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_UNKNOWN; + streamUsage = STREAM_USAGE_MEDIA; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_UNKNOWN; + streamUsage = STREAM_USAGE_VOICE_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_UNKNOWN; + streamUsage = STREAM_USAGE_VOICE_ASSISTANT; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_UNKNOWN; + streamUsage = STREAM_USAGE_NOTIFICATION_RINGTONE; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SPEECH; + streamUsage = STREAM_USAGE_UNKNOWN; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SPEECH; + streamUsage = STREAM_USAGE_MEDIA; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SPEECH; + streamUsage = STREAM_USAGE_NOTIFICATION_RINGTONE; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MUSIC; + streamUsage = STREAM_USAGE_UNKNOWN; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MUSIC; + streamUsage = STREAM_USAGE_MEDIA; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MUSIC; + streamUsage = STREAM_USAGE_VOICE_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MOVIE; + streamUsage = STREAM_USAGE_VOICE_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MOVIE; + streamUsage = STREAM_USAGE_VOICE_ASSISTANT; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MOVIE; + streamUsage = STREAM_USAGE_NOTIFICATION_RINGTONE; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MOVIE; + streamUsage = STREAM_USAGE_UNKNOWN; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MOVIE; + streamUsage = STREAM_USAGE_MEDIA; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SONIFICATION; + streamUsage = STREAM_USAGE_VOICE_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SONIFICATION; + streamUsage = STREAM_USAGE_VOICE_ASSISTANT; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_RINGTONE; + streamUsage = STREAM_USAGE_VOICE_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_RINGTONE; + streamUsage = STREAM_USAGE_VOICE_ASSISTANT; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_002 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_002, TestSize.Level1) +{ + // STREAM_ULTRASONIC + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_ULTRASONIC; + StreamUsage streamUsage = STREAM_USAGE_SYSTEM; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_003 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_003, TestSize.Level1) +{ + // STREAM_RING + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_MUSIC; + StreamUsage streamUsage = STREAM_USAGE_NOTIFICATION_RINGTONE; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_RINGTONE; + streamUsage = STREAM_USAGE_UNKNOWN; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_RINGTONE; + streamUsage = STREAM_USAGE_MEDIA; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_RINGTONE; + streamUsage = STREAM_USAGE_NOTIFICATION_RINGTONE; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SONIFICATION; + streamUsage = STREAM_USAGE_NOTIFICATION_RINGTONE; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_004 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_004, TestSize.Level1) +{ + // STREAM_VOICE_ASSISTANT + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_SPEECH; + StreamUsage streamUsage = STREAM_USAGE_VOICE_ASSISTANT; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_MUSIC; + streamUsage = STREAM_USAGE_VOICE_ASSISTANT; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_005 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_005, TestSize.Level1) +{ + // STREAM_VOICE_CALL + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_SPEECH; + StreamUsage streamUsage = STREAM_USAGE_VOICE_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SPEECH; + streamUsage = STREAM_USAGE_VOICE_MODEM_COMMUNICATION; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_006 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_006, TestSize.Level1) +{ + // STREAM_NOTIFICATION + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_SONIFICATION; + StreamUsage streamUsage = STREAM_USAGE_UNKNOWN; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); + + audioSceneEffectInfo = {}; + contentType = CONTENT_TYPE_SONIFICATION; + streamUsage = STREAM_USAGE_MEDIA; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_007 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_007, TestSize.Level1) +{ + // STREAM_ACCESSIBILITY + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_SPEECH; + StreamUsage streamUsage = STREAM_USAGE_ACCESSIBILITY; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} + +/** +* @tc.name : Test GetAudioEffectInfoArray API +* @tc.number: GetAudioEffectInfoArray_008 +* @tc.desc : Test GetAudioEffectInfoArray interface. +*/ +HWTEST(AudioManagerUnitTest, GetAudioEffectInfoArray_008, TestSize.Level1) +{ + // STREAM_ALARM + int32_t ret; + AudioSceneEffectInfo audioSceneEffectInfo; + ContentType contentType = CONTENT_TYPE_MUSIC; + StreamUsage streamUsage = STREAM_USAGE_ALARM; + ret = AudioStreamManager::GetInstance()->GetEffectInfoArray(audioSceneEffectInfo, contentType, streamUsage); + EXPECT_EQ(SUCCESS, ret); + EXPECT_EQ(EFFECT_NONE, audioSceneEffectInfo.mode[0]); + EXPECT_EQ(EFFECT_DEFAULT, audioSceneEffectInfo.mode[1]); +} } // namespace AudioStandard } // namespace OHOS diff --git a/frameworks/native/audiorenderer/include/audio_renderer_private.h b/frameworks/native/audiorenderer/include/audio_renderer_private.h index 4dda79663c4f066e4c6aa7c1e55f51df1063f455..860b98c1985143b44e68009e76157b5e455c1bda 100644 --- a/frameworks/native/audiorenderer/include/audio_renderer_private.h +++ b/frameworks/native/audiorenderer/include/audio_renderer_private.h @@ -85,6 +85,8 @@ public: const std::shared_ptr &callback) override; int32_t UnregisterAudioPolicyServerDiedCb(const int32_t clientPid) override; void DestroyAudioRendererStateCallback() override; + AudioEffectMode GetAudioEffectMode() const override; + int32_t SetAudioEffectMode(AudioEffectMode effectMode) const override; AudioRendererInfo rendererInfo_ = {}; diff --git a/frameworks/native/audiorenderer/src/audio_renderer.cpp b/frameworks/native/audiorenderer/src/audio_renderer.cpp index 422ac58b70aa6ff3c027563c5f4558af67f6df0b..1458827c1902fdd14df0fddce80157b52d28fa5c 100644 --- a/frameworks/native/audiorenderer/src/audio_renderer.cpp +++ b/frameworks/native/audiorenderer/src/audio_renderer.cpp @@ -940,5 +940,15 @@ void AudioRendererStateChangeCallbackImpl::OnRendererStateChange( cb->OnStateChange(deviceInfo); } } + +AudioEffectMode AudioRendererPrivate::GetAudioEffectMode() const +{ + return audioStream_->GetAudioEffectMode(); +} + +int32_t AudioRendererPrivate::SetAudioEffectMode(AudioEffectMode effectMode) const +{ + return audioStream_->SetAudioEffectMode(effectMode); +} } // namespace AudioStandard } // namespace OHOS diff --git a/frameworks/native/audiorenderer/test/unittest/renderer_test/src/audio_renderer_unit_test.cpp b/frameworks/native/audiorenderer/test/unittest/renderer_test/src/audio_renderer_unit_test.cpp index 317343438d2aa5d7186e6f634add9ad39d103e8c..c828db612248a23485c20db02e547a37d6cd69b0 100644 --- a/frameworks/native/audiorenderer/test/unittest/renderer_test/src/audio_renderer_unit_test.cpp +++ b/frameworks/native/audiorenderer/test/unittest/renderer_test/src/audio_renderer_unit_test.cpp @@ -4515,5 +4515,105 @@ HWTEST(AudioRendererUnitTest, Audio_Renderer_SetRendererPeriodPositionCallback_0 ret = audioRenderer->SetRendererPeriodPositionCallback(VALUE_NEGATIVE, positionCB); EXPECT_NE(SUCCESS, ret); } + +/** + * @tc.name : Test SetAudioEffectMode via legal input, EFFECT_NONE + * @tc.number: Audio_Renderer_SetAudioEffectMode_001 + * @tc.desc : Test SetAudioEffectMode interface. Returns SUCCESS, if the effect mode is successfully set. + */ +HWTEST(AudioRendererUnitTest, Audio_Renderer_SetAudioEffectMode_001, TestSize.Level1) +{ + int32_t ret = -1; + AudioRendererOptions rendererOptions; + + AudioRendererUnitTest::InitializeRendererOptions(rendererOptions); + unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions); + ASSERT_NE(nullptr, audioRenderer); + + ret = audioRenderer->SetAudioEffectMode(EFFECT_NONE); + EXPECT_EQ(SUCCESS, ret); + audioRenderer->Release(); +} + +/** + * @tc.name : Test SetAudioEffectMode via legal input, EFFECT_DEFAULT + * @tc.number: Audio_Renderer_SetAudioEffectMode_002 + * @tc.desc : Test SetAudioEffectMode interface. Returns SUCCESS, if the effect mode is successfully set. + */ +HWTEST(AudioRendererUnitTest, Audio_Renderer_SetAudioEffectMode_002, TestSize.Level1) +{ + int32_t ret = -1; + AudioRendererOptions rendererOptions; + + AudioRendererUnitTest::InitializeRendererOptions(rendererOptions); + unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions); + ASSERT_NE(nullptr, audioRenderer); + + ret = audioRenderer->SetAudioEffectMode(EFFECT_DEFAULT); + EXPECT_EQ(SUCCESS, ret); + audioRenderer->Release(); +} + +/** + * @tc.name : Test GetAudioEffectMode with, EFFECT_NONE + * @tc.number: Audio_Renderer_GetAudioEffectMode_001 + * @tc.desc : Test GetAudioEffectMode interface. Returns the current effect mode. + */ +HWTEST(AudioRendererUnitTest, Audio_Renderer_GetAudioEffectMode_001, TestSize.Level1) +{ + int32_t ret = -1; + AudioRendererOptions rendererOptions; + + AudioRendererUnitTest::InitializeRendererOptions(rendererOptions); + unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions); + ASSERT_NE(nullptr, audioRenderer); + + ret = audioRenderer->SetAudioEffectMode(EFFECT_NONE); + EXPECT_EQ(SUCCESS, ret); + + AudioEffectMode effectMode = audioRenderer->GetAudioEffectMode(); + EXPECT_EQ(EFFECT_NONE, effectMode); + audioRenderer->Release(); +} + +/** + * @tc.name : Test GetAudioEffectMode with, EFFECT_DEFAULT + * @tc.number: Audio_Renderer_GetAudioEffectMode_002 + * @tc.desc : Test GetAudioEffectMode interface. Returns the current effect mode. + */ +HWTEST(AudioRendererUnitTest, Audio_Renderer_GetAudioEffectMode_002, TestSize.Level1) +{ + int32_t ret = -1; + AudioRendererOptions rendererOptions; + + AudioRendererUnitTest::InitializeRendererOptions(rendererOptions); + unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions); + ASSERT_NE(nullptr, audioRenderer); + + ret = audioRenderer->SetAudioEffectMode(EFFECT_DEFAULT); + EXPECT_EQ(SUCCESS, ret); + + AudioEffectMode effectMode = audioRenderer->GetAudioEffectMode(); + EXPECT_EQ(EFFECT_DEFAULT, effectMode); + audioRenderer->Release(); +} + +/** + * @tc.name : Test GetAudioEffectMode with, default effectMode + * @tc.number: Audio_Renderer_GetAudioEffectMode_003 + * @tc.desc : Test GetAudioEffectMode interface. Returns the default effect mode EFFECT_DEFAULT. + */ +HWTEST(AudioRendererUnitTest, Audio_Renderer_GetAudioEffectMode_003, TestSize.Level1) +{ + AudioRendererOptions rendererOptions; + + AudioRendererUnitTest::InitializeRendererOptions(rendererOptions); + unique_ptr audioRenderer = AudioRenderer::Create(rendererOptions); + ASSERT_NE(nullptr, audioRenderer); + + AudioEffectMode effectMode = audioRenderer->GetAudioEffectMode(); + EXPECT_EQ(EFFECT_DEFAULT, effectMode); + audioRenderer->Release(); +} } // namespace AudioStandard } // namespace OHOS diff --git a/frameworks/native/audiostream/include/audio_stream.h b/frameworks/native/audiostream/include/audio_stream.h index 94b4a0ecbf6f44fb40b9ef8d796a681f4f87d573..159a9b2c5123bcc8b89e9228a8f4dd14cb3e111d 100644 --- a/frameworks/native/audiostream/include/audio_stream.h +++ b/frameworks/native/audiostream/include/audio_stream.h @@ -73,6 +73,8 @@ public: int32_t SetLowPowerVolume(float volume); float GetLowPowerVolume(); float GetSingleStreamVolume(); + AudioEffectMode GetAudioEffectMode(); + int32_t SetAudioEffectMode(AudioEffectMode effectMode); std::vector GetSupportedFormats() const; std::vector GetSupportedEncodingTypes() const; diff --git a/frameworks/native/audiostream/test/unittest/stream_test/src/audio_stream_unit_test.cpp b/frameworks/native/audiostream/test/unittest/stream_test/src/audio_stream_unit_test.cpp index 675d1b0a9e4d5f2fb16c13420db56662a6d746c5..bff872e20bdddbf91a7c4b13905cecec96c7a53b 100644 --- a/frameworks/native/audiostream/test/unittest/stream_test/src/audio_stream_unit_test.cpp +++ b/frameworks/native/audiostream/test/unittest/stream_test/src/audio_stream_unit_test.cpp @@ -24,6 +24,7 @@ using namespace testing::ext; namespace OHOS { namespace AudioStandard { const int32_t FAILURE = -1; +const int32_t FAILURE_PA = -8; const uint32_t DEFAULT_SAMPLING_RATE = 44100; const uint8_t DEFAULT_CHANNEL_COUNT = 2; const uint8_t DEFAULT_SAMPLE_SIZE = 2; @@ -402,5 +403,76 @@ HWTEST(AudioStreamUnitTest, Audio_Stream_SetStreamRenderRate_002, TestSize.Level ret = audioStream_->SetRendererWriteCallback(callback); EXPECT_EQ(true, ret < 0); } + +/** +* @tc.name : Test Audio_Stream_SetAudioEffectMode_001 via illegal state, input EFFECT_NONE +* @tc.number: Audio_Stream_SetAudioEffectMode_001 +* @tc.desc : Test SetAudioEffectMode interface. Returns FAILURE_PA. +*/ +HWTEST(AudioStreamUnitTest, Audio_Stream_SetAudioEffectMode_001, TestSize.Level1) +{ + std::shared_ptr audioStream_; + AudioStreamUnitTest::InitAudioStream(audioStream_); + AudioEffectMode effectMode = AudioEffectMode::EFFECT_NONE; + int32_t ret = audioStream_->SetAudioEffectMode(effectMode); + EXPECT_EQ(FAILURE_PA, ret); +} + +/** +* @tc.name : Test Audio_Stream_SetAudioEffectMode_002 via illegal state, input EFFECT_DEFAULT +* @tc.number: Audio_Stream_SetAudioEffectMode_002 +* @tc.desc : Test SetAudioEffectMode interface. Returns FAILURE_PA. +*/ +HWTEST(AudioStreamUnitTest, Audio_Stream_SetAudioEffectMode_002, TestSize.Level1) +{ + std::shared_ptr audioStream_; + AudioStreamUnitTest::InitAudioStream(audioStream_); + AudioEffectMode effectMode = AudioEffectMode::EFFECT_DEFAULT; + int32_t ret = audioStream_->SetAudioEffectMode(effectMode); + EXPECT_EQ(FAILURE_PA, ret); +} + +/** +* @tc.name : Test Audio_Stream_GetAudioEffectMode_001 with, EFFECT_NONE +* @tc.number: Audio_Stream_GetAudioEffectMode_001 +* @tc.desc : Test GetAudioEffectMode interface. Returns the default effect mode. +*/ +HWTEST(AudioStreamUnitTest, Audio_Stream_GetAudioEffectMode_001, TestSize.Level1) +{ + std::shared_ptr audioStream_; + AudioStreamUnitTest::InitAudioStream(audioStream_); + int32_t ret = audioStream_->SetAudioEffectMode(EFFECT_NONE); + EXPECT_EQ(FAILURE_PA, ret); + AudioEffectMode effectMode = audioStream_->GetAudioEffectMode(); + EXPECT_EQ(effectMode, AudioEffectMode::EFFECT_DEFAULT); +} + +/** +* @tc.name : Test Audio_Stream_GetAudioEffectMode_002 with, EFFECT_DEFAULT +* @tc.number: Audio_Stream_GetAudioEffectMode_002 +* @tc.desc : Test GetAudioEffectMode interface. Returns the default effect mode. +*/ +HWTEST(AudioStreamUnitTest, Audio_Stream_GetAudioEffectMode_002, TestSize.Level1) +{ + std::shared_ptr audioStream_; + AudioStreamUnitTest::InitAudioStream(audioStream_); + int32_t ret = audioStream_->SetAudioEffectMode(EFFECT_DEFAULT); + EXPECT_EQ(FAILURE_PA, ret); + AudioEffectMode effectMode = audioStream_->GetAudioEffectMode(); + EXPECT_EQ(effectMode, AudioEffectMode::EFFECT_DEFAULT); +} + +/** +* @tc.name : Test Audio_Stream_GetAudioEffectMode_003 with, default effectMode +* @tc.number: Audio_Stream_GetAudioEffectMode_003 +* @tc.desc : Test GetAudioEffectMode interface. Returns the default effect mode EFFECT_DEFAULT. +*/ +HWTEST(AudioStreamUnitTest, Audio_Stream_GetAudioEffectMode_003, TestSize.Level1) +{ + std::shared_ptr audioStream_; + AudioStreamUnitTest::InitAudioStream(audioStream_); + AudioEffectMode effectMode = audioStream_->GetAudioEffectMode(); + EXPECT_EQ(effectMode, AudioEffectMode::EFFECT_DEFAULT); +} } // namespace AudioStandard } // namespace OHOS \ No newline at end of file diff --git a/frameworks/native/pulseaudio/modules/BUILD.gn b/frameworks/native/pulseaudio/modules/BUILD.gn index 5fbb3dced6fd31c947567165c0f0720e2ea4b3f7..8a058374ce0e1f382217e84ed948a1b2f9589eae 100644 --- a/frameworks/native/pulseaudio/modules/BUILD.gn +++ b/frameworks/native/pulseaudio/modules/BUILD.gn @@ -17,5 +17,7 @@ group("pa_extend_modules") { deps = [ "hdi:module-hdi-sink", "hdi:module-hdi-source", + "effect:module-effect-sink", + "mixer:module-mixer-sink", ] } diff --git a/frameworks/native/pulseaudio/modules/effect/BUILD.gn b/frameworks/native/pulseaudio/modules/effect/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..fc7c414209935af533b6f767366f45dd14db67e5 --- /dev/null +++ b/frameworks/native/pulseaudio/modules/effect/BUILD.gn @@ -0,0 +1,72 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +pulseaudio_dir = "//third_party/pulseaudio" +pulseaudio_build_path = "//third_party/pulseaudio/ohosbuild" + +config("effect_config") { + visibility = [ ":*" ] + + include_dirs = [ + "$pulseaudio_dir/include", + "$pulseaudio_dir/src", + "$pulseaudio_dir", + "$pulseaudio_build_path/src", + "$pulseaudio_build_path/include", + "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiocommon/include", + "//foundation/multimedia/audio_framework/frameworks/native/audioeffect/include", + "//commonlibrary/c_utils/base/include", + ] + + cflags = [ + "-Wall", + "-Werror", + "-DHAVE_CONFIG_H", + "-D_GNU_SOURCE", + "-D__INCLUDED_FROM_PULSE_AUDIO", + ] +} + +ohos_shared_library("module-effect-sink") { + sanitize = { + cfi = true + debug = false + blocklist = "//foundation/multimedia/audio_framework/cfi_blocklist.txt" + } + sources = [ + "module_effect_sink.c", + ] + + configs = [ ":effect_config" ] + + cflags = [ "-DPA_MODULE_NAME=libmodule_effect_sink_z_so" ] + + ldflags = [ + "-Wl", + "--no-undefined", + ] + + deps = [ + "$pulseaudio_build_path/src:pulsecommon", + "$pulseaudio_build_path/src/pulse:pulse", + "$pulseaudio_build_path/src/pulsecore:pulsecore", + "//foundation/multimedia/audio_framework/frameworks/native/audioeffect:audio_effect", + ] + + external_deps = [ "hiviewdfx_hilog_native:libhilog" ] + + subsystem_name = "multimedia" + part_name = "audio_framework" +} \ No newline at end of file diff --git a/frameworks/native/pulseaudio/modules/effect/module_effect_sink.c b/frameworks/native/pulseaudio/modules/effect/module_effect_sink.c new file mode 100644 index 0000000000000000000000000000000000000000..01e92688e2dbcba6758c60149b25c058c2139325 --- /dev/null +++ b/frameworks/native/pulseaudio/modules/effect/module_effect_sink.c @@ -0,0 +1,639 @@ +/*** + This file is part of PulseAudio. + + Copyright 2004-2009 Lennart Poettering + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see . +***/ + +#ifdef HAVE_CONFIG_H +#include +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "audio_effect_chain_adapter.h" +#include "audio_log.h" + +PA_MODULE_AUTHOR("Lennart Poettering"); +PA_MODULE_DESCRIPTION("Virtual channel effecting sink"); +PA_MODULE_VERSION(PACKAGE_VERSION); +PA_MODULE_LOAD_ONCE(false); +PA_MODULE_USAGE( + "sink_name= " + "sink_properties= " + "master= " + "master_channel_map= " + "format= " + "rate= " + "channels= " + "channel_map= " + "resample_method= " + "remix="); + +struct userdata { + pa_module *module; + + pa_sink *sink; + pa_sink_input *sink_input; + struct EffectChainAdapter *effectChainAdapter; + float *bufIn; // input buffer, output of the effect sink + float *bufOut; // output buffer for the final processed output + size_t currIdx; + size_t processSize; + + pa_memblockq *bufInQ; + int32_t frameLen; + + bool auto_desc; +}; + +static const char* const valid_modargs[] = { + "sink_name", + "sink_properties", + "master", + "master_channel_map", + "format", + "rate", + "channels", + "channel_map", + "resample_method", + "remix", + NULL +}; + +/* Called from I/O thread context */ +static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { + struct userdata *u = PA_SINK(o)->userdata; + + switch (code) { + + case PA_SINK_MESSAGE_GET_LATENCY: + + /* The sink is _put() before the sink input is, so let's + * make sure we don't access it yet */ + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) { + *((int64_t*) data) = 0; + return 0; + } + + *((int64_t*) data) = + /* Get the latency of the master sink */ + pa_sink_get_latency_within_thread(u->sink_input->sink, true) + + + /* Add the latency internal to our sink input on top */ + pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec); + + return 0; + } + + return pa_sink_process_msg(o, code, data, offset, chunk); +} + +/* Called from main context */ +static int sink_set_state_in_main_thread(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->state)) + return 0; + + pa_sink_input_cork(u->sink_input, state == PA_SINK_SUSPENDED); + return 0; +} + +/* Called from the IO thread. */ +static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) { + struct userdata *u; + + pa_assert(s); + pa_assert_se(u = s->userdata); + + /* When set to running or idle for the first time, request a rewind + * of the master sink to make sure we are heard immediately */ + if (PA_SINK_IS_OPENED(new_state) && s->thread_info.state == PA_SINK_INIT) { + pa_log_debug("Requesting rewind due to state change."); + pa_sink_input_request_rewind(u->sink_input, 0, false, true, true); + } + + return 0; +} + +/* Called from I/O thread context */ +static void sink_request_rewind(pa_sink *s) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) + return; + + pa_sink_input_request_rewind(u->sink_input, s->thread_info.rewind_nbytes, true, false, false); +} + +/* Called from I/O thread context */ +static void sink_update_requested_latency(pa_sink *s) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) + return; + + /* Just hand this one over to the master sink */ + pa_sink_input_set_requested_latency_within_thread( + u->sink_input, + pa_sink_get_requested_latency_within_thread(s)); +} + +#define MAX_16BIT 32768 +// // BEGIN QUEUE +// static size_t memblockq_missing(pa_memblockq *bq) { +// size_t l, tlength; +// pa_assert(bq); + +// tlength = pa_memblockq_get_tlength(bq); +// if ((l = pa_memblockq_get_length(bq)) >= tlength) +// return 0; + +// l = tlength - l; +// return l >= pa_memblockq_get_minreq(bq) ? l : 0; +// } + +// /* Called from I/O thread context */ +// static int sink_input_pop_cb(pa_sink_input *si, size_t nbytes, pa_memchunk *chunk) { +// struct userdata *u; +// size_t bytes_missing; +// pa_memchunk tchunk; + +// pa_sink_input_assert_ref(si); +// pa_assert(chunk); +// pa_assert_se(u = si->userdata); + +// if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) +// return -1; + +// /* Hmm, process any rewind request that might be queued up */ +// pa_sink_process_rewind(u->sink, 0); + +// while ((bytes_missing = memblockq_missing(u->bufInQ)) != 0) { +// pa_memchunk nchunk; + +// pa_sink_render(u->sink, bytes_missing, &nchunk); +// pa_memblockq_push(u->bufInQ, &nchunk); +// pa_memblock_unref(nchunk.memblock); +// } + +// // pa_memblockq_rewind(u->bufInQ, sink_bytes(u, u->fftlen - BLOCK_SIZE)); +// pa_memblockq_peek_fixed_size(u->bufInQ, u->frameLen * 2 * 2, &tchunk); +// pa_memblockq_drop(u->bufInQ, tchunk.length); + +// chunk->index = 0; +// chunk->length = u->frameLen * 2 * 2; +// chunk->memblock = pa_memblock_new(si->sink->core->mempool, chunk->length); + +// float *bufIn = (float *)u->effectChainAdapter->bufIn; +// float *bufOut = (float *)u->effectChainAdapter->bufOut; +// short *src = pa_memblock_acquire_chunk(&tchunk); +// short *dst = pa_memblock_acquire_chunk(chunk); +// int i, tmp; +// for (i = 0; i < u->frameLen * 2; i++) { +// bufIn[i] = (float)(src[i]) / MAX_16BIT; +// } +// EffectChainManagerProcess(u->effectChainAdapter, si->sink->name); +// for (i = 0; i < u->frameLen * 2; i++) { +// tmp = (int)(bufOut[i] * MAX_16BIT); +// if (tmp >= MAX_16BIT) { +// dst[i] = MAX_16BIT - 1; +// } else if (tmp <= -MAX_16BIT) { +// dst[i] = -MAX_16BIT; +// } else { +// dst[i] = (short)tmp; +// } +// } + +// pa_memblock_release(tchunk.memblock); +// pa_memblock_unref(tchunk.memblock); + +// return 0; +// } +// // END QUEUE + +/* Called from I/O thread context */ +static int sink_input_pop_cb(pa_sink_input *si, size_t nbytes, pa_memchunk *chunk) { + struct userdata *u; + + pa_sink_input_assert_ref(si); + pa_assert(chunk); + pa_assert_se(u = si->userdata); + + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) + return -1; + + /* Hmm, process any rewind request that might be queued up */ + pa_sink_process_rewind(u->sink, 0); + + pa_sink_render(u->sink, nbytes, chunk); + + const char *sceneMode = pa_proplist_gets(si->proplist, "scene.mode"); + // AUDIO_INFO_LOG("effect_sink: sink-input %{public}s pop in sink %{public}s", sceneMode, si->origin_sink->name); + EffectChainManagerProcess(u->effectChainAdapter, si->origin_sink->name); + + short *src = pa_memblock_acquire_chunk(chunk); + int i = 0; + + AUDIO_INFO_LOG("effect_sink: sink-input %{public}s pop in sink %{public}s", sceneMode, si->origin_sink->name); + for (i = 0; i < (int)chunk->length; i++) { + src[i] *= 3; + } + + return 0; +} + +/* Called from I/O thread context */ +static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { + size_t amount = 0; + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + /* If the sink is not yet linked, there is nothing to rewind */ + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) + return; + + if (u->sink->thread_info.rewind_nbytes > 0) { + amount = PA_MIN(u->sink->thread_info.rewind_nbytes, nbytes); + u->sink->thread_info.rewind_nbytes = 0; + } + + pa_sink_process_rewind(u->sink, amount); +} + +/* Called from I/O thread context */ +static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + /* FIXME: Too small max_rewind: + * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */ + pa_sink_set_max_rewind_within_thread(u->sink, nbytes); +} + +/* Called from I/O thread context */ +static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_set_max_request_within_thread(u->sink, nbytes); +} + +/* Called from I/O thread context */ +static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency); +} + +/* Called from I/O thread context */ +static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency); +} + +/* Called from I/O thread context */ +static void sink_input_detach_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + if (PA_SINK_IS_LINKED(u->sink->thread_info.state)) + pa_sink_detach_within_thread(u->sink); + + pa_sink_set_rtpoll(u->sink, NULL); +} + +/* Called from I/O thread context */ +static void sink_input_attach_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll); + pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency); + pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency); + pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i)); + + /* FIXME: Too small max_rewind: + * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */ + pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i)); + + if (PA_SINK_IS_LINKED(u->sink->thread_info.state)) + pa_sink_attach_within_thread(u->sink); +} + +/* Called from main context */ +static void sink_input_kill_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + /* The order here matters! We first kill the sink so that streams + * can properly be moved away while the sink input is still connected + * to the master. */ + pa_sink_input_cork(u->sink_input, true); + pa_sink_unlink(u->sink); + pa_sink_input_unlink(u->sink_input); + + pa_sink_input_unref(u->sink_input); + u->sink_input = NULL; + + pa_sink_unref(u->sink); + u->sink = NULL; + + pa_module_unload_request(u->module, true); +} + +/* Called from main context */ +static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + if (dest) { + pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq); + pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags); + } else + pa_sink_set_asyncmsgq(u->sink, NULL); + + if (u->auto_desc && dest) { + const char *k; + pa_proplist *pl; + + pl = pa_proplist_new(); + k = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION); + pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "effected %s", k ? k : dest->name); + + pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl); + pa_proplist_free(pl); + } +} + +int pa__init(pa_module *m) { + struct userdata *u; + pa_sample_spec ss; + pa_resample_method_t resample_method = PA_RESAMPLER_INVALID; + pa_channel_map sink_map, stream_map; + pa_modargs *ma; + pa_sink *master; + pa_sink_input_new_data sink_input_data; + pa_sink_new_data sink_data; + bool remix = true; + + pa_assert(m); + + if (!(ma = pa_modargs_new(m->argument, valid_modargs))) { + pa_log("Failed to parse module arguments."); + goto fail; + } + + if (!(master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "master", NULL), PA_NAMEREG_SINK))) { + pa_log("Master sink not found"); + goto fail; + } + // master = m->core->default_sink; + + ss = master->sample_spec; + sink_map = master->channel_map; + if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &sink_map, PA_CHANNEL_MAP_DEFAULT) < 0) { + pa_log("Invalid sample format specification or channel map"); + goto fail; + } + + stream_map = sink_map; + if (pa_modargs_get_channel_map(ma, "master_channel_map", &stream_map) < 0) { + pa_log("Invalid master channel map"); + goto fail; + } + + if (stream_map.channels != ss.channels) { + pa_log("Number of channels doesn't match"); + goto fail; + } + + if (pa_channel_map_equal(&stream_map, &master->channel_map)) + pa_log_warn("No effecting configured, proceeding nonetheless!"); + + if (pa_modargs_get_value_boolean(ma, "remix", &remix) < 0) { + pa_log("Invalid boolean remix parameter"); + goto fail; + } + + if (pa_modargs_get_resample_method(ma, &resample_method) < 0) { + pa_log("Invalid resampling method"); + goto fail; + } + + u = pa_xnew0(struct userdata, 1); + u->module = m; + m->userdata = u; + + /* Create sink */ + pa_sink_new_data_init(&sink_data); + sink_data.driver = __FILE__; + sink_data.module = m; + if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL)))) + sink_data.name = pa_sprintf_malloc("%s.effected", master->name); + pa_sink_new_data_set_sample_spec(&sink_data, &ss); + pa_sink_new_data_set_channel_map(&sink_data, &sink_map); + pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, master->name); + pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter"); + + if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) { + pa_log("Invalid properties"); + pa_sink_new_data_done(&sink_data); + goto fail; + } + + if ((u->auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) { + const char *k; + + k = pa_proplist_gets(master->proplist, PA_PROP_DEVICE_DESCRIPTION); + pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "effected %s", k ? k : master->name); + } + + u->sink = pa_sink_new(m->core, &sink_data, master->flags & (PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY)); + pa_sink_new_data_done(&sink_data); + + if (!u->sink) { + pa_log("Failed to create sink."); + goto fail; + } + + u->sink->parent.process_msg = sink_process_msg; + u->sink->set_state_in_main_thread = sink_set_state_in_main_thread; + u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb; + u->sink->update_requested_latency = sink_update_requested_latency; + u->sink->request_rewind = sink_request_rewind; + u->sink->userdata = u; + + pa_sink_set_asyncmsgq(u->sink, master->asyncmsgq); + + /* Create sink input */ + pa_sink_input_new_data_init(&sink_input_data); + sink_input_data.driver = __FILE__; + sink_input_data.module = m; + pa_sink_input_new_data_set_sink(&sink_input_data, master, false, true); + sink_input_data.origin_sink = u->sink; + pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "effected Stream"); + pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter"); + pa_proplist_sets(sink_input_data.proplist, "scene.mode", "effect"); + pa_sink_input_new_data_set_sample_spec(&sink_input_data, &ss); + pa_sink_input_new_data_set_channel_map(&sink_input_data, &stream_map); + sink_input_data.flags = (remix ? 0 : PA_SINK_INPUT_NO_REMIX) | PA_SINK_INPUT_START_CORKED; + sink_input_data.resample_method = resample_method; + + pa_sink_input_new(&u->sink_input, m->core, &sink_input_data); + pa_sink_input_new_data_done(&sink_input_data); + + if (!u->sink_input) + goto fail; + + u->sink_input->pop = sink_input_pop_cb; + u->sink_input->process_rewind = sink_input_process_rewind_cb; + u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb; + u->sink_input->update_max_request = sink_input_update_max_request_cb; + u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb; + u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb; + u->sink_input->attach = sink_input_attach_cb; + u->sink_input->detach = sink_input_detach_cb; + u->sink_input->kill = sink_input_kill_cb; + u->sink_input->moving = sink_input_moving_cb; + u->sink_input->userdata = u; + + u->sink->input_to_master = u->sink_input; + + // LoadEffectChainAdapter + u->effectChainAdapter = pa_xnew0(struct EffectChainAdapter, 1); + int32_t ret = LoadEffectChainAdapter(u->effectChainAdapter); + if (ret < 0) { + AUDIO_INFO_LOG("Load adapter failed"); + goto fail; + } + + // Test adapter function + // int idx = EffectChainManagerReturnValue(u->effectChainAdapter, 2); + // AUDIO_INFO_LOG("xyq: effect_sink EffectChainReturnValue, value=%{public}d", idx); + + int32_t frameLen = EffectChainManagerGetFrameLen(u->effectChainAdapter); + u->frameLen = frameLen; + AUDIO_INFO_LOG("xyq: effect_sink (%{public}s) FrameLen, value=%{public}d", u->sink->name, frameLen); + AUDIO_INFO_LOG("xyq: effect_sink (%{public}s) bufferSize, value=%{public}d", u->sink->name, ss.channels * frameLen * sizeof(float)); + u->currIdx = 0; + u->processSize = ss.channels * frameLen * sizeof(float); + pa_assert_se(u->effectChainAdapter->bufIn = (float *)malloc(u->processSize)); + pa_assert_se(u->effectChainAdapter->bufOut = (float *)malloc(u->processSize)); + + + /* The order here is important. The input must be put first, + * otherwise streams might attach to the sink before the sink + * input is attached to the master. */ + AUDIO_INFO_LOG("xyq: effect_sink (%{public}s) sink input put before", u->sink->name); + pa_sink_input_put(u->sink_input); + AUDIO_INFO_LOG("xyq: effect_sink (%{public}s) sink input put done", u->sink->name); + pa_sink_put(u->sink); + pa_sink_input_cork(u->sink_input, false); + + + pa_modargs_free(ma); + AUDIO_INFO_LOG("xyq: effect_sink (%{public}s) create done", u->sink->name); + return 0; + +fail: + AUDIO_INFO_LOG("xyq: effect_sink create fail"); + if (ma) + pa_modargs_free(ma); + + pa__done(m); + + return -1; +} + +int pa__get_n_used(pa_module *m) { + struct userdata *u; + + pa_assert(m); + pa_assert_se(u = m->userdata); + + return pa_sink_linked_by(u->sink); +} + +void pa__done(pa_module*m) { + struct userdata *u; + + pa_assert(m); + + if (!(u = m->userdata)) + return; + + /* See comments in sink_input_kill_cb() above regarding + * destruction order! */ + + if (u->sink_input) + pa_sink_input_cork(u->sink_input, true); + + if (u->sink) + pa_sink_unlink(u->sink); + + if (u->sink_input) { + pa_sink_input_unlink(u->sink_input); + pa_sink_input_unref(u->sink_input); + } + + if (u->sink) + pa_sink_unref(u->sink); + + pa_xfree(u); +} diff --git a/frameworks/native/pulseaudio/modules/mixer/BUILD.gn b/frameworks/native/pulseaudio/modules/mixer/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..cab1874f094be8cb0899b525b159cb68f502daf6 --- /dev/null +++ b/frameworks/native/pulseaudio/modules/mixer/BUILD.gn @@ -0,0 +1,78 @@ +# Copyright (c) 2021-2022 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") + +pulseaudio_dir = "//third_party/pulseaudio" +pulseaudio_build_path = "//third_party/pulseaudio/ohosbuild" + +config("mixer_config") { + visibility = [ ":*" ] + + include_dirs = [ + "$pulseaudio_dir/include", + "$pulseaudio_dir/src", + "$pulseaudio_dir", + "$pulseaudio_build_path/src", + "$pulseaudio_build_path/include", + "//drivers/peripheral/audio/interfaces/include", + "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiocommon/include", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/sink/common", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/sink/primary", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/sink/file", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/sink/bluetooth", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/sink/remote", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/source/common", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/source/primary", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/source/file", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/source/remote", + "//commonlibrary/c_utils/base/include", + ] + + cflags = [ + "-Wall", + "-Werror", + "-DHAVE_CONFIG_H", + "-D_GNU_SOURCE", + "-D__INCLUDED_FROM_PULSE_AUDIO", + ] +} + +# mixer +ohos_shared_library("module-mixer-sink") { + sanitize = { + cfi = true + debug = false + blocklist = "//foundation/multimedia/audio_framework/cfi_blocklist.txt" + } + sources = [ + "module_mixer_sink.c", + ] + + configs = [ ":mixer_config" ] + + cflags = [ "-DPA_MODULE_NAME=libmodule_mixer_sink_z_so" ] + + deps = [ + "$pulseaudio_build_path/src:pulsecommon", + "$pulseaudio_build_path/src/pulse:pulse", + "$pulseaudio_build_path/src/pulsecore:pulsecore", + "//foundation/multimedia/audio_framework/frameworks/native/hdiadapter/source:capturer_source_adapter", + "//third_party/bounds_checking_function:libsec_shared", + ] + + external_deps = [ "hiviewdfx_hilog_native:libhilog" ] + + subsystem_name = "multimedia" + part_name = "audio_framework" +} diff --git a/frameworks/native/pulseaudio/modules/mixer/module_mixer_sink.c b/frameworks/native/pulseaudio/modules/mixer/module_mixer_sink.c new file mode 100644 index 0000000000000000000000000000000000000000..64fc3c60a91dea740ef2648f4e5eef53729134c9 --- /dev/null +++ b/frameworks/native/pulseaudio/modules/mixer/module_mixer_sink.c @@ -0,0 +1,637 @@ +/*** + This file is part of PulseAudio. + + Copyright 2004-2008 Lennart Poettering + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see . +***/ + +#ifdef HAVE_CONFIG_H +#include +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "audio_log.h" + +PA_MODULE_AUTHOR("Lennart Poettering"); +PA_MODULE_DESCRIPTION(_("Clocked NULL sink")); +PA_MODULE_VERSION(PACKAGE_VERSION); +PA_MODULE_LOAD_ONCE(false); +PA_MODULE_USAGE( + "sink_name= " + "sink_properties= " + "format= " + "rate= " + "channels= " + "channel_map=" + "formats=" + "norewinds="); + +#define DEFAULT_SINK_NAME "null" +#define BLOCK_USEC (2 * PA_USEC_PER_SEC) +#define BLOCK_USEC_NOREWINDS (50 * PA_USEC_PER_MSEC) + +struct userdata { + pa_core *core; + pa_module *module; + pa_sink *sink; + + pa_thread *thread; + pa_thread_mq thread_mq; + pa_rtpoll *rtpoll; + + pa_usec_t block_usec; + pa_usec_t timestamp; + + pa_idxset *formats; + + bool norewinds; +}; + +static const char* const valid_modargs[] = { + "sink_name", + "sink_properties", + "format", + "rate", + "channels", + "channel_map", + "formats", + "norewinds", + NULL +}; + +// mixer +// static pa_hook_result_t sink_input_put_cb(pa_core *c, pa_sink_input *si, struct userdata *u) { +// uint32_t idx; +// AUDIO_INFO_LOG("blank_sink: sink_put_hook_callback"); +// pa_sink *s; +// pa_assert(c); +// pa_assert(u); + +// // get proplist attribute +// // const char *streamType = pa_proplist_gets(si->proplist, "stream.type"); +// const char *sceneMode = pa_proplist_gets(si->proplist, "scene.mode"); +// const char *sceneType = pa_proplist_gets(si->proplist, "scene.type"); + +// if (sceneType) { + + + +// AUDIO_INFO_LOG("blank_sink: before move sinkinputs %{public}s, %{public}s, name %{public}s, index %{public}d", +// sceneType, sceneMode, si->sink->name, si->sink->index); +// // check is effectsink +// const char *effectSinkInputList = "effect"; +// if (pa_str_in_list_spaces(effectSinkInputList, sceneMode)){ +// AUDIO_INFO_LOG("blank_sink: in effect sink list sceneMode %{public}s ", sceneMode); +// return PA_HOOK_OK; +// } + +// // check EFFECT_NONE or EFFECT_DEFAULT +// if(pa_safe_streq(sceneMode, "EFFECT_NONE")){ +// pa_sink_input_move_to(si, c->default_sink, false); +// AUDIO_INFO_LOG("blank_sink: sceneMode = EFFECT_NONE move sinkinputs %{public}s to sink %{public}s",sceneType,c->default_sink->name); +// return PA_HOOK_OK; +// } +// // classfy sinkinput to effect sink +// int flag = 0; +// PA_IDXSET_FOREACH(s, c->sinks, idx) { +// AUDIO_INFO_LOG("blank_sink: check sinkinput sceneType %{public}s and sink %{public}s", sceneType, s->name); +// if(pa_safe_streq(s->name, sceneType)) { +// pa_sink_input_move_to(si, s, false); +// AUDIO_INFO_LOG("blank_sink: flag = 1 move sinkinputs %{public}s to sink %{public}s", sceneType, s->name); +// flag = 1; +// break; +// } +// } +// // classfy sinkinput to default sink +// if (flag == 0) { +// pa_sink_input_move_to(si, c->default_sink, false); +// AUDIO_INFO_LOG("blank_sink: flag = 0 move sinkinputs %{public}s to sink %{public}s", sceneType, c->default_sink->name); +// } +// AUDIO_INFO_LOG("blank_sink: after move sinkinputs %{public}s, name %{public}s, index %{public}d", sceneType, si->sink->name, si->sink->index); + +// } +// return PA_HOOK_OK; +// } + +static pa_hook_result_t sink_input_put_cb_proplist_changed(pa_core *c, pa_sink_input *si, struct userdata *u) { + // uint32_t idx; + // AUDIO_INFO_LOG("blank_sink: sink_input_put_cb_proplist_changed"); + // pa_sink *s; + // pa_assert(c); + // pa_assert(u); + // // get proplist attribute + // // const char *streamType = pa_proplist_gets(si->proplist, "stream.type"); + // const char *sceneMode = pa_proplist_gets(si->proplist, "scene.mode"); + // const char *sceneType = pa_proplist_gets(si->proplist, "scene.type"); + + // AUDIO_INFO_LOG("blank_sink_proplist: before move sinkinputs %{public}s, %{public}s, name %{public}s, index %{public}d", + // sceneType, sceneMode, si->sink->name, si->sink->index); + // // check EFFECT_NONE or EFFECT_DEFAULT + // if(pa_safe_streq(sceneMode, "EFFECT_NONE")){ + // pa_sink_input_move_to(si, c->default_sink, false); + // AUDIO_INFO_LOG("blank_sink_proplist: sceneMode = EFFECT_NONE move sinkinputs %{public}s to sink %{public}s",sceneType,c->default_sink->name); + // return PA_HOOK_OK; + // } + // // classfy sinkinput to effect sink + // int flag = 0; + // PA_IDXSET_FOREACH(s, c->sinks, idx) { + // AUDIO_INFO_LOG("blank_sink_proplist: check sinkinput sceneType %{public}s and sink %{public}s", sceneType, s->name); + // if(pa_safe_streq(s->name, sceneType)) { + // pa_sink_input_move_to(si, s, false); + // AUDIO_INFO_LOG("blank_sink_proplist: flag = 1 move sinkinputs %{public}s to sink %{public}s", sceneType, s->name); + // flag = 1; + // break; + // } + // } + // // classfy sinkinput to default sink + // if (flag == 0) { + // pa_sink_input_move_to(si, c->default_sink, false); + // AUDIO_INFO_LOG("blank_sink_proplist: flag = 0 move sinkinputs %{public}s to sink %{public}s", sceneType, c->default_sink->name); + // } + // AUDIO_INFO_LOG("blank_sink_proplist: after move sinkinputs %{public}s, name %{public}s, index %{public}d", sceneType, si->sink->name, si->sink->index); + + // return PA_HOOK_OK; + + + // uint32_t idx; + AUDIO_INFO_LOG("blank_sink: sink_input_put_cb_proplist_changed"); + // pa_sink *s; + pa_sink *effect_sink; + pa_assert(c); + pa_assert(u); + // get proplist attribute + // const char *streamType = pa_proplist_gets(si->proplist, "stream.type"); + const char *sceneMode = pa_proplist_gets(si->proplist, "scene.mode"); + const char *sceneType = pa_proplist_gets(si->proplist, "scene.type"); + + AUDIO_INFO_LOG("blank_sink_proplist: before move sinkinputs %{public}s, %{public}s, name %{public}s, index %{public}d", + sceneType, sceneMode, si->sink->name, si->sink->index); + // check default/none + if(pa_safe_streq(sceneMode, "EFFECT_NONE")){ + pa_sink_input_move_to(si, c->default_sink, false); + AUDIO_INFO_LOG("blank_sink_proplist: sceneMode = EFFECT_NONE move sinkinputs %{public}s to sink %{public}s", sceneType, c->default_sink->name); + return PA_HOOK_OK; + } + + // classfy sinkinput to effect sink + // int flag = 0; + // pa_sink *effect_sink; + effect_sink = pa_namereg_get(c, sceneType, PA_NAMEREG_SINK); + if (!effect_sink) { + AUDIO_INFO_LOG("blank_sink: effect_sink sink not found."); + // classfy sinkinput to default sink + pa_sink_input_move_to(si, c->default_sink, false); + AUDIO_INFO_LOG("blank_sink: move sinkinputs %{public}s to sink %{public}s", sceneType, c->default_sink->name); + } else{ + // classfy sinkinput to effect sink + pa_sink_input_move_to(si, effect_sink, false); + AUDIO_INFO_LOG("blank_sink: move sinkinputs %{public}s to sink %{public}s", sceneType, effect_sink->name); + } + // PA_IDXSET_FOREACH(s, c->sinks, idx) { + // AUDIO_INFO_LOG("blank_sink: check sinkinput sceneType %{public}s and sink %{public}s", sceneType, s->name); + // if(pa_streq(s->name, sceneType)) { + // pa_sink_input_move_to(si, s, false); + // AUDIO_INFO_LOG("blank_sink: flag = 1 move sinkinputs %{public}s to sink %{public}s", sceneType, s->name); + // flag = 1; + // break; + // } + // } + + // classfy sinkinput to default sink + // if (flag == 0) { + // pa_sink_input_move_to(si, c->default_sink, false); + // AUDIO_INFO_LOG("blank_sink: flag = 0 move sinkinputs %{public}s to sink %{public}s", sceneType, c->default_sink->name); + // } + AUDIO_INFO_LOG("blank_sink_proplist: after move sinkinputs %{public}s, name %{public}s, index %{public}d", sceneType, si->sink->name, si->sink->index); + + return PA_HOOK_OK; +} + +static int sink_process_msg( + pa_msgobject *o, + int code, + void *data, + int64_t offset, + pa_memchunk *chunk) { + + struct userdata *u = PA_SINK(o)->userdata; + + switch (code) { + case PA_SINK_MESSAGE_GET_LATENCY: { + pa_usec_t now; + + now = pa_rtclock_now(); + *((int64_t*) data) = (int64_t)u->timestamp - (int64_t)now; + + return 0; + } + } + + return pa_sink_process_msg(o, code, data, offset, chunk); +} + +/* Called from the IO thread. */ +static void sink_recalculate_max_request_and_rewind(pa_sink *s) { + struct userdata *u; + size_t nbytes; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + nbytes = pa_usec_to_bytes(u->block_usec, &s->sample_spec); + + if (u->norewinds) { + pa_sink_set_max_rewind_within_thread(s, 0); + } else { + pa_sink_set_max_rewind_within_thread(s, nbytes); + } + + pa_sink_set_max_request_within_thread(s, nbytes); +} + +/* Called from the IO thread. */ +static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) { + struct userdata *u; + + pa_assert(s); + pa_assert_se(u = s->userdata); + + if (s->thread_info.state == PA_SINK_SUSPENDED || s->thread_info.state == PA_SINK_INIT) { + if (PA_SINK_IS_OPENED(new_state)) { + u->timestamp = pa_rtclock_now(); + + /* If sink was suspended to change sample formats, both + * thread_info.max_request and thread_info.max_rewind + * must be updated before first block is rendered + */ + sink_recalculate_max_request_and_rewind(s); + } + } + + return 0; +} + +/* Called from the IO thread. */ +static void sink_update_requested_latency_cb(pa_sink *s) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + u->block_usec = pa_sink_get_requested_latency_within_thread(s); + + if (u->block_usec == (pa_usec_t) -1) + u->block_usec = s->thread_info.max_latency; + + sink_recalculate_max_request_and_rewind(s); +} + +static void sink_reconfigure_cb(pa_sink *s, pa_sample_spec *spec, bool passthrough) { + /* We don't need to do anything */ + s->sample_spec = *spec; +} + +static bool sink_set_formats_cb(pa_sink *s, pa_idxset *formats) { + struct userdata *u = s->userdata; + + pa_assert(u); + + pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free); + u->formats = pa_idxset_copy(formats, (pa_copy_func_t) pa_format_info_copy); + + return true; +} + +static pa_idxset* sink_get_formats_cb(pa_sink *s) { + struct userdata *u = s->userdata; + + pa_assert(u); + + return pa_idxset_copy(u->formats, (pa_copy_func_t) pa_format_info_copy); +} + +static void process_rewind(struct userdata *u, pa_usec_t now) { + size_t rewind_nbytes, in_buffer; + pa_usec_t delay; + + pa_assert(u); + + rewind_nbytes = u->sink->thread_info.rewind_nbytes; + + if (!PA_SINK_IS_OPENED(u->sink->thread_info.state) || rewind_nbytes <= 0) + goto do_nothing; + + pa_log_debug("Requested to rewind %lu bytes.", (unsigned long) rewind_nbytes); + + if (u->timestamp <= now) + goto do_nothing; + + delay = u->timestamp - now; + in_buffer = pa_usec_to_bytes(delay, &u->sink->sample_spec); + + if (in_buffer <= 0) + goto do_nothing; + + if (rewind_nbytes > in_buffer) + rewind_nbytes = in_buffer; + + pa_sink_process_rewind(u->sink, rewind_nbytes); + u->timestamp -= pa_bytes_to_usec(rewind_nbytes, &u->sink->sample_spec); + + pa_log_debug("Rewound %lu bytes.", (unsigned long) rewind_nbytes); + return; + +do_nothing: + + pa_sink_process_rewind(u->sink, 0); +} + +static void process_render(struct userdata *u, pa_usec_t now) { + size_t ate = 0; + + pa_assert(u); + + /* This is the configured latency. Sink inputs connected to us + might not have a single frame more than the maxrequest value + queued. Hence: at maximum read this many bytes from the sink + inputs. */ + + /* Fill the buffer up the latency size */ + while (u->timestamp < now + u->block_usec) { + pa_memchunk chunk; + size_t request_size; + + request_size = pa_usec_to_bytes(now + u->block_usec - u->timestamp, &u->sink->sample_spec); + request_size = PA_MIN(request_size, u->sink->thread_info.max_request); + pa_sink_render(u->sink, request_size, &chunk); + + pa_memblock_unref(chunk.memblock); + +/* pa_log_debug("Ate %lu bytes.", (unsigned long) chunk.length); */ + u->timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec); + + ate += chunk.length; + + if (ate >= u->sink->thread_info.max_request) + break; + } + +/* pa_log_debug("Ate in sum %lu bytes (of %lu)", (unsigned long) ate, (unsigned long) nbytes); */ +} + +static void thread_func(void *userdata) { + struct userdata *u = userdata; + + pa_assert(u); + + pa_log_debug("Thread starting up"); + + if (u->core->realtime_scheduling) + pa_thread_make_realtime(u->core->realtime_priority); + + pa_thread_mq_install(&u->thread_mq); + + u->timestamp = pa_rtclock_now(); + + for (;;) { + pa_usec_t now = 0; + int ret; + + if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) + now = pa_rtclock_now(); + + if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) + process_rewind(u, now); + + /* Render some data and drop it immediately */ + if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) { + if (u->timestamp <= now) + process_render(u, now); + + pa_rtpoll_set_timer_absolute(u->rtpoll, u->timestamp); + } else + pa_rtpoll_set_timer_disabled(u->rtpoll); + + /* Hmm, nothing to do. Let's sleep */ + if ((ret = pa_rtpoll_run(u->rtpoll)) < 0) + goto fail; + + if (ret == 0) + goto finish; + } + +fail: + /* If this was no regular exit from the loop we have to continue + * processing messages until we received PA_MESSAGE_SHUTDOWN */ + pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL); + pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN); + +finish: + pa_log_debug("Thread shutting down"); +} + +int pa__init(pa_module*m) { + struct userdata *u = NULL; + pa_sample_spec ss; + pa_channel_map map; + pa_modargs *ma = NULL; + pa_sink_new_data data; + pa_format_info *format; + const char *formats; + size_t nbytes; + + pa_assert(m); + + if (!(ma = pa_modargs_new(m->argument, valid_modargs))) { + pa_log("Failed to parse module arguments."); + goto fail; + } + + ss = m->core->default_sample_spec; + map = m->core->default_channel_map; + if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) { + pa_log("Invalid sample format specification or channel map"); + goto fail; + } + + m->userdata = u = pa_xnew0(struct userdata, 1); + u->core = m->core; + u->module = m; + u->rtpoll = pa_rtpoll_new(); + u->block_usec = BLOCK_USEC; + + if (pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll) < 0) { + pa_log("pa_thread_mq_init() failed."); + goto fail; + } + + pa_sink_new_data_init(&data); + data.driver = __FILE__; + data.module = m; + pa_sink_new_data_set_name(&data, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME)); + pa_sink_new_data_set_sample_spec(&data, &ss); + pa_sink_new_data_set_channel_map(&data, &map); + pa_proplist_sets(data.proplist, PA_PROP_DEVICE_DESCRIPTION, _("Null Output")); + pa_proplist_sets(data.proplist, PA_PROP_DEVICE_CLASS, "abstract"); + + u->formats = pa_idxset_new(NULL, NULL); + if ((formats = pa_modargs_get_value(ma, "formats", NULL))) { + char *f = NULL; + const char *state = NULL; + + while ((f = pa_split(formats, ";", &state))) { + format = pa_format_info_from_string(pa_strip(f)); + + if (!format) { + pa_log(_("Failed to set format: invalid format string %s"), f); + pa_xfree(f); + goto fail; + } + pa_xfree(f); + + pa_idxset_put(u->formats, format, NULL); + } + } else { + format = pa_format_info_new(); + format->encoding = PA_ENCODING_PCM; + pa_idxset_put(u->formats, format, NULL); + } + + if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) { + pa_log("Invalid properties"); + pa_sink_new_data_done(&data); + goto fail; + } + + u->sink = pa_sink_new(m->core, &data, PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY | PA_SINK_SET_FORMATS); + pa_sink_new_data_done(&data); + + if (!u->sink) { + pa_log("Failed to create sink object."); + goto fail; + } + + u->sink->parent.process_msg = sink_process_msg; + u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb; + u->sink->update_requested_latency = sink_update_requested_latency_cb; + u->sink->reconfigure = sink_reconfigure_cb; + u->sink->get_formats = sink_get_formats_cb; + u->sink->set_formats = sink_set_formats_cb; + u->sink->userdata = u; + + pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq); + pa_sink_set_rtpoll(u->sink, u->rtpoll); + + if(pa_modargs_get_value_boolean(ma, "norewinds", &u->norewinds) < 0){ + pa_log("Invalid argument, norewinds expects a boolean value."); + } + + if (u->norewinds) + u->block_usec = BLOCK_USEC_NOREWINDS; + + nbytes = pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec); + + if(u->norewinds){ + pa_sink_set_max_rewind(u->sink, 0); + } else { + pa_sink_set_max_rewind(u->sink, nbytes); + } + + pa_sink_set_max_request(u->sink, nbytes); + + if (!(u->thread = pa_thread_new("null-sink", thread_func, u))) { + pa_log("Failed to create thread."); + goto fail; + } + + pa_sink_set_latency_range(u->sink, 0, u->block_usec); + + // pa_module_hook_connect(m, &m->core->hooks[PA_CORE_HOOK_SINK_INPUT_PUT], PA_HOOK_LATE, (pa_hook_cb_t) sink_input_put_cb, u); + pa_module_hook_connect(m, &m->core->hooks[PA_CORE_HOOK_SINK_INPUT_PROPLIST_CHANGED], PA_HOOK_LATE, (pa_hook_cb_t) sink_input_put_cb_proplist_changed, u); + + pa_sink_put(u->sink); + + pa_modargs_free(ma); + + return 0; + +fail: + if (ma) + pa_modargs_free(ma); + + pa__done(m); + + return -1; +} + +int pa__get_n_used(pa_module *m) { + struct userdata *u; + + pa_assert(m); + pa_assert_se(u = m->userdata); + + return pa_sink_linked_by(u->sink); +} + +void pa__done(pa_module*m) { + struct userdata *u; + + pa_assert(m); + + if (!(u = m->userdata)) + return; + + if (u->sink) + pa_sink_unlink(u->sink); + + if (u->thread) { + pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL); + pa_thread_free(u->thread); + } + + pa_thread_mq_done(&u->thread_mq); + + if (u->sink) + pa_sink_unref(u->sink); + + if (u->rtpoll) + pa_rtpoll_free(u->rtpoll); + + if (u->formats) + pa_idxset_free(u->formats, (pa_free_cb_t) pa_format_info_free); + + pa_xfree(u); +} diff --git a/interfaces/inner_api/native/audiocommon/include/audio_effect.h b/interfaces/inner_api/native/audiocommon/include/audio_effect.h new file mode 100644 index 0000000000000000000000000000000000000000..eca48abb4c5c269a20be2487762d187784089d55 --- /dev/null +++ b/interfaces/inner_api/native/audiocommon/include/audio_effect.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include "audio_info.h" + +#ifndef AUDIO_FRAMEWORK_AUDIO_EFFECT_H +#define AUDIO_FRAMEWORK_AUDIO_EFFECT_H + +#define AUDIO_EFFECT_LIBRARY_INFO_SYM AELI +#define AUDIO_EFFECT_LIBRARY_INFO_SYM_AS_STR "AELI" +#define EFFECT_STRING_LEN_MAX 64 + +namespace OHOS { +namespace AudioStandard { +// audio effect manager info +constexpr int32_t AUDIO_EFFECT_COUNT_UPPER_LIMIT = 20; +constexpr int32_t AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT = 1; + +struct Library { + std::string name; + std::string path; +}; + +struct Effect { + std::string name; + std::string libraryName; +}; + +struct EffectChain { + std::string name; + std::vector apply; +}; + +struct Device { + std::string type; + std::string chain; +}; + +struct Preprocess { + std::string stream; + std::vector mode; + std::vector> device; +}; + +struct Postprocess { + std::string stream; + std::vector mode; + std::vector> device; +}; + +struct OriginalEffectConfig { + float version; + std::vector libraries; + std::vector effects; + std::vector effectChains; + std::vector preProcess; + std::vector postProcess; +}; + +struct StreamEffectMode { + std::string mode; + std::vector devicePort; +}; + +struct Stream { + std::string scene; + std::vector streamEffectMode; +}; + +struct ProcessNew { + std::vector stream; +}; + +struct SupportedEffectConfig { + std::vector effectChains; + ProcessNew preProcessNew; + ProcessNew postProcessNew; +}; + +typedef struct EffectInterfaceS **EffectHandleT; + +typedef struct AudioBufferS { + size_t frameCount; // number of frames in buffer + union { + void *raw; // raw pointer to start of buffer + float *f32; // pointer to float 32 bit data at start of buffer + int32_t *s32; // pointer to signed 32 bit data at start of buffer + int16_t *s16; // pointer to signed 16 bit data at start of buffer + uint8_t *u8; // pointer to unsigned 8 bit data at start of buffer + }; +} AudioBufferT; + +// for initial version +typedef struct EffectDescriptorS { + std::string type; + std::string id; + uint32_t apiVersion; + uint32_t flags; + uint16_t cpuLoad; + uint16_t memoryUsage; + char name[EFFECT_STRING_LEN_MAX]; + char implementor[EFFECT_STRING_LEN_MAX]; +} EffectDescriptorT; + +struct EffectInterfaceS { + int32_t (*Process)(EffectHandleT self, AudioBufferT *inBuffer, AudioBufferT *outBuffer); + + int32_t + (*Command)(EffectHandleT self, uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, + void *pReplyData); + + int32_t (*GetDescriptor)(EffectHandleT self, EffectDescriptorT *pDescriptor); + + int32_t (*ProcessReverse)(EffectHandleT self, AudioBufferT *inBuffer, AudioBufferT *outBuffer); +}; + +// for initial version +typedef struct AudioEffectLibraryS { + uint32_t tag; + uint32_t version; + const char *name; + const char *implementor; + + int32_t (*CreateEffect)(const std::string *id, int32_t sessionId, int32_t ioId, EffectHandleT *pHandle); + + int32_t (*ReleaseEffect)(EffectHandleT handle); + + int32_t (*GetDescriptor)(const std::string *id, EffectDescriptorT *pDescriptor); +} AudioEffectLibraryT; + +typedef struct LibEntryS { + AudioEffectLibraryT *desc; + std::string name; + std::string path; + void *handle; + std::vector > effects; +} LibEntryT; + +} // namespce AudioStandard +} // namespace OHOS + +#endif //AUDIO_FRAMEWORK_AUDIO_EFFECT_H \ No newline at end of file diff --git a/interfaces/inner_api/native/audiocommon/include/audio_info.h b/interfaces/inner_api/native/audiocommon/include/audio_info.h index 18a9f84c3a12b8c1aff88bd2ed2f13b494152783..433bb97b50a3e4f66c7af3c34fdfe507e24392cc 100644 --- a/interfaces/inner_api/native/audiocommon/include/audio_info.h +++ b/interfaces/inner_api/native/audiocommon/include/audio_info.h @@ -520,6 +520,26 @@ enum AudioFocuState { STOP }; +/** +* Enumerates the audio scene effect type. +*/ +enum AudioEffectScene { + SCENE_OTHERS = 0, + SCENE_MUSIC = 1, + SCENE_MOVIE = 2, + SCENE_GAME = 3, + SCENE_SPEECH = 4, + SCENE_RING = 5 +}; + +/** +* Enumerates the audio scene effct mode. +*/ +enum AudioEffectMode { + EFFECT_NONE = 0, + EFFECT_DEFAULT = 1 +}; + struct InterruptEvent { /** * Interrupt event type, begin or end @@ -888,6 +908,10 @@ struct StreamSetStateEventInternal { AudioStreamType audioStreamType; }; +struct AudioSceneEffectInfo { + std::vector mode; +}; + struct AudioRendererChangeInfo { int32_t clientUID; int32_t sessionId; @@ -966,53 +990,6 @@ enum StateChangeCmdType { CMD_FROM_SYSTEM = 1 }; -// audio effect manager info -constexpr int32_t AUDIO_EFFECT_COUNT_UPPER_LIMIT = 20; -constexpr int32_t AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT = 1; - -struct Library { - std::string name; - std::string path; -}; - -struct Effect { - std::string name; - std::string libraryName; - std::string effectId; -}; - -struct EffectChain { - std::string name; - std::vector apply; -}; - -struct Device { - std::string type; - std::string address; - std::string chain; -}; - -struct Preprocess { - std::string stream; - std::vector mode; - std::vector> device; -}; - -struct Postprocess { - std::string stream; - std::vector mode; - std::vector> device; -}; - -struct OriginalEffectConfig { - float version; - std::vector libraries; - std::vector effects; - std::vector effectChains; - std::vector preprocess; - std::vector postprocess; -}; - class AudioRendererPolicyServiceDiedCallback { public: virtual ~AudioRendererPolicyServiceDiedCallback() = default; diff --git a/interfaces/inner_api/native/audiomanager/include/audio_stream_manager.h b/interfaces/inner_api/native/audiomanager/include/audio_stream_manager.h index ba3866d6e59feaf4a67762c480b538abe19024a2..06c019f9be803c7c928c93467bafa160d9c9f10b 100644 --- a/interfaces/inner_api/native/audiomanager/include/audio_stream_manager.h +++ b/interfaces/inner_api/native/audiomanager/include/audio_stream_manager.h @@ -17,7 +17,7 @@ #define ST_AUDIO_STREAM_MANAGER_H #include - +#include #include "audio_info.h" namespace OHOS { @@ -165,6 +165,22 @@ public: * @since 9 */ bool IsAudioRendererLowLatencySupported(const AudioStreamInfo &audioStreamInfo); + + /** + * @brief Get Audio Effect Infos. + * + * @param AudioSceneEffectInfo AudioSceneEffectInfo + * @return Returns {@link SUCCESS} if callback registration is successful; returns an error code + * defined in {@link audio_errors.h} otherwise. + * @since 9 + */ + int32_t GetEffectInfoArray(AudioSceneEffectInfo &audioSceneEffectInfo, + ContentType contentType, StreamUsage streamUsage); +}; + +static const std::map effectModeMap = { + {"EFFECT_NONE", EFFECT_NONE}, + {"EFFECT_DEFAULT", EFFECT_DEFAULT} }; } // namespace AudioStandard } // namespace OHOS diff --git a/interfaces/inner_api/native/audiorenderer/include/audio_renderer.h b/interfaces/inner_api/native/audiorenderer/include/audio_renderer.h index 467b167e0cfb661e2ef983a25dd7af9d89c8d3d1..7e385cc77d0a3b615d4ff6831cc08f48a4e91f00 100644 --- a/interfaces/inner_api/native/audiorenderer/include/audio_renderer.h +++ b/interfaces/inner_api/native/audiorenderer/include/audio_renderer.h @@ -685,6 +685,24 @@ public: */ virtual int32_t GetCurrentOutputDevices(DeviceInfo &deviceInfo) const = 0; + /** + * @brief Gets the audio effect mode. + * + * @return Returns current audio effect mode. + * @since 10 + */ + virtual AudioEffectMode GetAudioEffectMode() const = 0; + + /** + * @brief Sets the audio effect mode. + * + * * @param effectMode The audio effect mode at which the stream needs to be rendered. + * @return Returns {@link SUCCESS} if audio effect mode is successfully set; returns an error code + * defined in {@link audio_errors.h} otherwise. + * @since 10 + */ + virtual int32_t SetAudioEffectMode(AudioEffectMode effectMode) const = 0; + /** * @brief Registers the renderer event callback listener. * diff --git a/interfaces/kits/js/audio_manager/include/audio_stream_mgr_napi.h b/interfaces/kits/js/audio_manager/include/audio_stream_mgr_napi.h index 164d60f910b59fb39b74b7d6fad3ebec4ea2462c..d5adcb726d9c9d22c5f2235ee1bb2127c2daf9d5 100644 --- a/interfaces/kits/js/audio_manager/include/audio_stream_mgr_napi.h +++ b/interfaces/kits/js/audio_manager/include/audio_stream_mgr_napi.h @@ -16,6 +16,7 @@ #ifndef AUDIO_STREAM_MGR_NAPI_H_ #define AUDIO_STREAM_MGR_NAPI_H_ +#include "audio_errors.h" #include "audio_renderer.h" #include "napi/native_api.h" #include "napi/native_node_api.h" @@ -36,6 +37,25 @@ public: static napi_value GetStreamManager(napi_env env, napi_callback_info info); private: + struct AudioStreamMgrAsyncContext { + napi_env env; + napi_async_work work; + napi_deferred deferred; + napi_ref callbackRef = nullptr; + int32_t status = SUCCESS; + int32_t volType; + int32_t contentType; + int32_t streamUsage; + bool isTrue; + bool isLowLatencySupported; + bool isActive; + AudioStreamInfo audioStreamInfo; + AudioStreamMgrNapi *objectInfo; + std::vector> audioRendererChangeInfos; + std::vector> audioCapturerChangeInfos; + AudioSceneEffectInfo audioSceneEffectInfo; + }; + static napi_value GetCurrentAudioRendererInfos(napi_env env, napi_callback_info info); static napi_value GetCurrentAudioCapturerInfos(napi_env env, napi_callback_info info); static napi_value On(napi_env env, napi_callback_info info); @@ -53,6 +73,13 @@ private: static void Destructor(napi_env env, void *nativeObject, void *finalize_hint); static bool ParseAudioStreamInfo(napi_env env, napi_value root, AudioStreamInfo &audioStreamInfo); static void IsLowLatencySupportedCallback(napi_env env, napi_status status, void *data); + static napi_value GetEffectInfoArray(napi_env env, napi_callback_info info); + static void GetCurrentCapturerChangeInfosCallbackComplete(napi_env env, napi_status status, void *data); + static void IsTrueAsyncCallbackComplete(napi_env env, napi_status status, void *data); + static void GetEffectInfoArrayCallbackComplete(napi_env env, napi_status status, void *data); + static void GetCurrentRendererChangeInfosCallbackComplete(napi_env env, napi_status status, void *data); + static void CommonCallbackRoutine(napi_env env, AudioStreamMgrAsyncContext* &asyncContext, + const napi_value &valueParam); napi_env env_; AudioStreamManager *audioStreamMngr_; AudioSystemManager *audioMngr_; diff --git a/interfaces/kits/js/audio_renderer/include/audio_renderer_napi.h b/interfaces/kits/js/audio_renderer/include/audio_renderer_napi.h index a4d3de76fe62063ab187a2f65d366855bcc6266c..1eb421b7d92b87065554be86c80115c335d08c51 100644 --- a/interfaces/kits/js/audio_renderer/include/audio_renderer_napi.h +++ b/interfaces/kits/js/audio_renderer/include/audio_renderer_napi.h @@ -77,6 +77,7 @@ private: size_t totalBytesWritten; uint32_t underflowCount; void *data; + int32_t audioEffectMode; AudioSampleFormat sampleFormat; AudioSamplingRate samplingRate; AudioChannel channelCount; @@ -118,6 +119,8 @@ private: static napi_value GetMaxStreamVolume(napi_env env, napi_callback_info info); static napi_value GetCurrentOutputDevices(napi_env env, napi_callback_info info); static napi_value GetUnderflowCount(napi_env env, napi_callback_info info); + static napi_value GetAudioEffectMode(napi_env env, napi_callback_info info); + static napi_value SetAudioEffectMode(napi_env env, napi_callback_info info); static void JudgeFuncDrain(napi_env &env, napi_value &result, std::unique_ptr &asyncContext); @@ -172,6 +175,7 @@ private: static napi_value CreateInterruptHintTypeObject(napi_env env); static napi_value CreateAudioStateObject(napi_env env); static napi_value CreateAudioSampleFormatObject(napi_env env); + static napi_value CreateAudioEffectModeObject(napi_env env); static void RegisterRendererDeviceChangeCallback(napi_env env, napi_value* args, AudioRendererNapi *rendererNapi); static void UnregisterRendererDeviceChangeCallback(napi_env env, size_t argc, napi_value* args, AudioRendererNapi *rendererNapi); @@ -181,6 +185,7 @@ private: static napi_ref interruptHintType_; static napi_ref audioState_; static napi_ref sampleFormat_; + static napi_ref audioEffectMode_; static std::unique_ptr sAudioParameters_; static std::unique_ptr sRendererOptions_; static std::mutex createMutex_; diff --git a/services/audio_policy/client/include/audio_policy_base.h b/services/audio_policy/client/include/audio_policy_base.h index 24f27eb43a8403ab1aa2cfa49cde7de2bded3ef9..f1862b4153cbb2f8d8d7650251c6959a749985f6 100644 --- a/services/audio_policy/client/include/audio_policy_base.h +++ b/services/audio_policy/client/include/audio_policy_base.h @@ -24,6 +24,7 @@ #include "iremote_proxy.h" #include "iremote_stub.h" #include "audio_system_manager.h" +#include "audio_effect.h" namespace OHOS { namespace AudioStandard { @@ -186,6 +187,8 @@ public: virtual float GetMinStreamVolume(void) = 0; virtual float GetMaxStreamVolume(void) = 0; + + virtual int32_t QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig) = 0; public: DECLARE_INTERFACE_DESCRIPTOR(u"IAudioPolicy"); }; diff --git a/services/audio_policy/client/include/audio_policy_manager_stub.h b/services/audio_policy/client/include/audio_policy_manager_stub.h index 8cdf0760c88ba99edc49eacf01ea527f08242143..821f296c9734401131ced5e5c7475f984d730d29 100644 --- a/services/audio_policy/client/include/audio_policy_manager_stub.h +++ b/services/audio_policy/client/include/audio_policy_manager_stub.h @@ -104,6 +104,7 @@ private: void GetSystemSoundUriInternal(MessageParcel &data, MessageParcel &reply); void GetMinStreamVolumeInternal(MessageParcel &data, MessageParcel &reply); void GetMaxStreamVolumeInternal(MessageParcel &data, MessageParcel &reply); + void QueryEffectSceneModeInternal(MessageParcel &data, MessageParcel &reply); }; } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_policy/client/include/audio_policy_proxy.h b/services/audio_policy/client/include/audio_policy_proxy.h index d8cd4e7b08991d5c77b7a8c930775f41d29b16a6..de4b056ca805226963fb03577bb3c0fe55a36eb2 100644 --- a/services/audio_policy/client/include/audio_policy_proxy.h +++ b/services/audio_policy/client/include/audio_policy_proxy.h @@ -182,6 +182,8 @@ public: float GetMinStreamVolume(void) override; float GetMaxStreamVolume(void) override; + + int32_t QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig) override; private: static inline BrokerDelegator mDdelegator; void WriteAudioInteruptParams(MessageParcel &parcel, const AudioInterrupt &audioInterrupt); diff --git a/services/audio_policy/client/src/audio_policy_manager.cpp b/services/audio_policy/client/src/audio_policy_manager.cpp index ba03dff0e2e070f5f6bb4a3946f5e1bfbd701037..3319a31d0f6fb7f2024e24687e2b691bfcc8f728 100644 --- a/services/audio_policy/client/src/audio_policy_manager.cpp +++ b/services/audio_policy/client/src/audio_policy_manager.cpp @@ -1071,5 +1071,16 @@ int32_t AudioPolicyManager::UnregisterAudioPolicyServerDiedCb(const int32_t clie } return SUCCESS; } + +int32_t AudioPolicyManager::QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig) +{ + const sptr gsp = GetAudioPolicyManagerProxy(); + if (gsp == nullptr) { + AUDIO_ERR_LOG("QueryEffectSceneMode: audio policy manager proxy is NULL."); + return -1; + } + int error = gsp->QueryEffectSceneMode(supportedEffectConfig); // audio_policy_proxy + return error; +} } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_policy/client/src/audio_policy_proxy.cpp b/services/audio_policy/client/src/audio_policy_proxy.cpp index c8eebeb96de921654b509d2931334fd1a74b5d68..93346a520bf4001c9c5af9ead43da7d5854c9a38 100644 --- a/services/audio_policy/client/src/audio_policy_proxy.cpp +++ b/services/audio_policy/client/src/audio_policy_proxy.cpp @@ -1780,5 +1780,153 @@ float AudioPolicyProxy::GetMaxStreamVolume() } return reply.ReadFloat(); } + +static void EffectChainApplyProcess(EffectChain &tmp, MessageParcel &reply, int countApply) +{ + int j; + for (j = 0; j < countApply; j++) { + string ECapply = reply.ReadString(); + tmp.apply.push_back(ECapply); + } +} + +static void EffectChainProcess(SupportedEffectConfig &supportedEffectConfig, MessageParcel &reply) +{ + EffectChain tmp; + string ECname = reply.ReadString(); + tmp.name = ECname; + int countApply = reply.ReadInt32(); + if (countApply > 0) { + EffectChainApplyProcess(tmp, reply, countApply); + } + supportedEffectConfig.effectChains.push_back(tmp); +} + +static void PreprocessMode(Stream &stream, MessageParcel &reply, int countMode) +{ + int j, k; + for (j = 0; j < countMode; j++) { + StreamEffectMode streamEffectMode; + streamEffectMode.mode = reply.ReadString(); + int countDev = reply.ReadInt32(); + if (countDev > 0) { + for (k = 0; k < countDev; k++) { + string type = reply.ReadString(); + string chain = reply.ReadString(); + streamEffectMode.devicePort.push_back({type, chain}); + } + } + stream.streamEffectMode.push_back(streamEffectMode); + } +} + +static Stream PreprocessProcess(MessageParcel &reply) +{ + Stream stream; + stream.scene = reply.ReadString(); + int countMode = reply.ReadInt32(); + if (countMode > 0) { + PreprocessMode(stream, reply, countMode); + } + return stream; +} + +static void PostprocessMode(Stream &stream, MessageParcel &reply, int countMode) +{ + int j, k; + for (j = 0; j < countMode; j++) { + StreamEffectMode streamEffectMode; + streamEffectMode.mode = reply.ReadString(); + int countDev = reply.ReadInt32(); + if (countDev > 0) { + for (k = 0; k < countDev; k++) { + string type = reply.ReadString(); + string chain = reply.ReadString(); + streamEffectMode.devicePort.push_back({type, chain}); + } + } + stream.streamEffectMode.push_back(streamEffectMode); + } +} + +static Stream PostprocessProcess(MessageParcel &reply) +{ + Stream stream; + stream.scene = reply.ReadString(); + int countMode = reply.ReadInt32(); + if (countMode > 0) { + PostprocessMode(stream, reply, countMode); + } + return stream; +} + +static int32_t QueryEffectSceneModeChkReply(int countEC, int countPre, int countPost) +{ + if ((countEC < 0) || (countEC > AUDIO_EFFECT_COUNT_UPPER_LIMIT)) { + AUDIO_ERR_LOG("QUERY_EFFECT_SCENEMODE read replyParcel failed"); + return -1; + } + if ((countPre < 0) || (countPre > AUDIO_EFFECT_COUNT_UPPER_LIMIT)) { + AUDIO_ERR_LOG("QUERY_EFFECT_SCENEMODE read replyParcel failed"); + return -1; + } + if ((countPost < 0) || (countPost > AUDIO_EFFECT_COUNT_UPPER_LIMIT)) { + AUDIO_ERR_LOG("QUERY_EFFECT_SCENEMODE read replyParcel failed"); + return -1; + } + return 0; +} + +int32_t AudioPolicyProxy::QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig) +{ + int i; + int32_t error; + MessageParcel data; + MessageParcel reply; + MessageOption option; + if (!data.WriteInterfaceToken(GetDescriptor())) { + AUDIO_ERR_LOG("QueryEffectSceneMode: WriteInterfaceToken failed"); + return -1; + } + error = Remote()->SendRequest(QUERY_EFFECT_SCENEMODE, data, reply, option); + if (error != ERR_NONE) { + AUDIO_ERR_LOG("get scene & mode failed, error: %d", error); + return error; + } + int countEC = reply.ReadInt32(); + int countPre = reply.ReadInt32(); + int countPost = reply.ReadInt32(); + error = QueryEffectSceneModeChkReply(countEC, countPre, countPost); + if (error != ERR_NONE) { + AUDIO_ERR_LOG("get scene & mode failed, error: %d", error); + return error; + } + // effectChain + if (countEC > 0) { + for (i = 0; i < countEC; i++) { + EffectChainProcess(supportedEffectConfig, reply); + } + } + // preprocess + Stream stream; + if (countPre > 0) { + ProcessNew preProcessNew; + for (i = 0; i < countPre; i++) { + stream = PreprocessProcess(reply); + preProcessNew.stream.push_back(stream); + } + supportedEffectConfig.preProcessNew = preProcessNew; + } + // postprocess + if (countPost > 0) { + ProcessNew postProcessNew; + for (i = 0; i < countPost; i++) { + stream = PostprocessProcess(reply); + postProcessNew.stream.push_back(stream); + } + supportedEffectConfig.postProcessNew = postProcessNew; + } + return 0; +} } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_policy/common/include/audio_policy_types.h b/services/audio_policy/common/include/audio_policy_types.h index 7d8373f8c7f1e0bd5ef814d57b33e37b66a3d31d..af1faafe0d34b126bc4b300dcb74419760232a1e 100644 --- a/services/audio_policy/common/include/audio_policy_types.h +++ b/services/audio_policy/common/include/audio_policy_types.h @@ -91,6 +91,7 @@ enum AudioPolicyCommand { GET_SYSTEM_SOUND_URI, GET_MIN_VOLUME_STREAM, GET_MAX_VOLUME_STREAM, + QUERY_EFFECT_SCENEMODE, }; } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_policy/server/config/audio_effect_config.xml b/services/audio_policy/server/config/audio_effect_config.xml index af4dabf81722a73478b46972ae42550d7eabfcd0..8fc80628d3521ceee95f245d0d81ae726fac545a 100644 --- a/services/audio_policy/server/config/audio_effect_config.xml +++ b/services/audio_policy/server/config/audio_effect_config.xml @@ -15,13 +15,13 @@ --> - + - - + + @@ -34,21 +34,21 @@ - - + + - + - - - - + + + + - - - - + + + + - + \ No newline at end of file diff --git a/services/audio_policy/server/include/audio_policy_server.h b/services/audio_policy/server/include/audio_policy_server.h index a9c5ec823d1ec53623a435573d3bf2781effd771..79f24d9a2393c41f0ef80feba793589eb89e92cc 100644 --- a/services/audio_policy/server/include/audio_policy_server.h +++ b/services/audio_policy/server/include/audio_policy_server.h @@ -223,6 +223,8 @@ public: float GetMinStreamVolume(void) override; float GetMaxStreamVolume(void) override; + + int32_t QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig) override; class RemoteParameterCallback : public AudioParameterCallback { public: RemoteParameterCallback(sptr server); diff --git a/services/audio_policy/server/include/service/audio_policy_service.h b/services/audio_policy/server/include/service/audio_policy_service.h index 9851d08ddb92591ad37b5bebc027955a6884951b..f5dc0ec57da2c4127bbebfb19f2b1ffb56a07949 100644 --- a/services/audio_policy/server/include/service/audio_policy_service.h +++ b/services/audio_policy/server/include/service/audio_policy_service.h @@ -233,6 +233,8 @@ public: float GetMaxStreamVolume(void); + int32_t QueryEffectManagerSceneMode(SupportedEffectConfig &supportedEffectConfig); + private: AudioPolicyService() :audioPolicyManager_(AudioPolicyManagerFactory::GetAudioPolicyManager()), diff --git a/services/audio_policy/server/include/service/effect/audio_effect_config_parser.h b/services/audio_policy/server/include/service/effect/audio_effect_config_parser.h index bd8fdeb44cfe7e836165008dda681e905eefa480..34c65c1ea0ee067fd3edee7f7dddb8eb867faa79 100644 --- a/services/audio_policy/server/include/service/effect/audio_effect_config_parser.h +++ b/services/audio_policy/server/include/service/effect/audio_effect_config_parser.h @@ -22,7 +22,7 @@ #include #include #include "audio_log.h" -#include "audio_info.h" +#include "audio_effect.h" namespace OHOS { namespace AudioStandard { diff --git a/services/audio_policy/server/include/service/effect/audio_effect_manager.h b/services/audio_policy/server/include/service/effect/audio_effect_manager.h index 1a2bff4ee18ceeb36f13f7a0503f94c947c1b5b2..80105a2252bae40a707272b9a30af8f24c4defaa 100644 --- a/services/audio_policy/server/include/service/effect/audio_effect_manager.h +++ b/services/audio_policy/server/include/service/effect/audio_effect_manager.h @@ -17,6 +17,7 @@ #define ST_AUDIO_EFFECT_MANAGER_H #include "audio_log.h" +#include "audio_effect.h" #include "audio_effect_config_parser.h" namespace OHOS { @@ -34,10 +35,22 @@ public: void GetOriginalEffectConfig(OriginalEffectConfig &oriEffectConfig); void GetAvailableEffects(std::vector &availableEffects); void UpdateAvailableEffects(std::vector &newAvailableEffects); + void GetSupportedEffectConfig(SupportedEffectConfig &supportedEffectConfig); + void GetAvailableAEConfig(); + int32_t QueryEffectManagerSceneMode(SupportedEffectConfig &supportedEffectConfig); private: OriginalEffectConfig oriEffectConfig_; std::vector availableEffects_; + SupportedEffectConfig supportedEffectConfig_; + int32_t existDefault_ = 0; + + void UpdateAvailableAEConfig(OriginalEffectConfig &aeConfig); + void UpdateEffectChains(std::vector &availableLayout); + void UpdateDuplicateBypassMode(ProcessNew &preProcessNew); + void UpdateDuplicateMode(ProcessNew &preProcessNew); + void UpdateDuplicateDevice(ProcessNew &preProcessNew); + int32_t UpdateUnavailableEffectChains(std::vector &availableLayout, ProcessNew &preProcessNew); }; } // namespce AudioStandard } // namespace OHOS diff --git a/services/audio_policy/server/include/service/manager/audio_adapter_manager.h b/services/audio_policy/server/include/service/manager/audio_adapter_manager.h index e024110423079da389676d53d6d8ec306131fc36..9c0c408112c665efb107149ba30a365e8ec07f33 100644 --- a/services/audio_policy/server/include/service/manager/audio_adapter_manager.h +++ b/services/audio_policy/server/include/service/manager/audio_adapter_manager.h @@ -35,6 +35,8 @@ public: static constexpr std::string_view HDI_SOURCE = "libmodule-hdi-source.z.so"; static constexpr std::string_view PIPE_SINK = "libmodule-pipe-sink.z.so"; static constexpr std::string_view PIPE_SOURCE = "libmodule-pipe-source.z.so"; + static constexpr std::string_view MIXER_SINK = "libmodule-mixer-sink.z.so"; + static constexpr std::string_view EFFECT_SINK = "libmodule-effect-sink.z.so"; static constexpr uint32_t KVSTORE_CONNECT_RETRY_COUNT = 5; static constexpr uint32_t KVSTORE_CONNECT_RETRY_DELAY_TIME = 200000; static constexpr float MIN_VOLUME = 0.0f; diff --git a/services/audio_policy/server/src/audio_policy_manager_stub.cpp b/services/audio_policy/server/src/audio_policy_manager_stub.cpp index dd2dcea4e69814906124189fee5de1f25225d7d2..f0a8cdccfd7d9ac0c805de64096387adbde35731 100644 --- a/services/audio_policy/server/src/audio_policy_manager_stub.cpp +++ b/services/audio_policy/server/src/audio_policy_manager_stub.cpp @@ -868,6 +868,105 @@ void AudioPolicyManagerStub::GetMaxStreamVolumeInternal(MessageParcel &data, Mes reply.WriteFloat(volume); } +static void EffectChainProcess(SupportedEffectConfig &supportedEffectConfig, MessageParcel &reply, int i) +{ + int j; + reply.WriteString(supportedEffectConfig.effectChains[i].name); + int countApply = supportedEffectConfig.effectChains[i].apply.size(); + reply.WriteInt32(countApply); + if (countApply > 0) { + for (j = 0; j < countApply; j++) { + // i th EffectChain's j th apply + reply.WriteString(supportedEffectConfig.effectChains[i].apply[j]); + } + } +} +static void PreprocessMode(SupportedEffectConfig &supportedEffectConfig, MessageParcel &reply, int i, int j) +{ + int k; + reply.WriteString(supportedEffectConfig.preProcessNew.stream[i].streamEffectMode[j].mode); + int countDev = supportedEffectConfig.preProcessNew.stream[i].streamEffectMode[j].devicePort.size(); + reply.WriteInt32(countDev); + if (countDev > 0) { + for (k = 0; k < countDev; k++) { + reply.WriteString(supportedEffectConfig.preProcessNew.stream[i].streamEffectMode[j].devicePort[k].type); + reply.WriteString(supportedEffectConfig.preProcessNew.stream[i].streamEffectMode[j].devicePort[k].chain); + } + } +} +static void PreprocessProcess(SupportedEffectConfig &supportedEffectConfig, MessageParcel &reply, int i) +{ + int j; + reply.WriteString(supportedEffectConfig.preProcessNew.stream[i].scene); + int countMode = supportedEffectConfig.preProcessNew.stream[i].streamEffectMode.size(); + reply.WriteInt32(countMode); + if (countMode > 0) { + for (j = 0; j < countMode; j++) { + PreprocessMode(supportedEffectConfig, reply, i, j); + } + } +} +static void PostprocessMode(SupportedEffectConfig &supportedEffectConfig, MessageParcel &reply, int i, int j) +{ + int k; + reply.WriteString(supportedEffectConfig.postProcessNew.stream[i].streamEffectMode[j].mode); + int countDev = supportedEffectConfig.postProcessNew.stream[i].streamEffectMode[j].devicePort.size(); + reply.WriteInt32(countDev); + if (countDev > 0) { + for (k = 0; k < countDev; k++) { + reply.WriteString(supportedEffectConfig.postProcessNew.stream[i].streamEffectMode[j].devicePort[k].type); + reply.WriteString(supportedEffectConfig.postProcessNew.stream[i].streamEffectMode[j].devicePort[k].chain); + } + } +} +static void PostprocessProcess(SupportedEffectConfig &supportedEffectConfig, MessageParcel &reply, int i) +{ + int j; + // i th stream + reply.WriteString(supportedEffectConfig.postProcessNew.stream[i].scene); + int countMode = supportedEffectConfig.postProcessNew.stream[i].streamEffectMode.size(); + reply.WriteInt32(countMode); + if (countMode > 0) { + for (j = 0; j < countMode; j++) { + PostprocessMode(supportedEffectConfig, reply, i, j); + } + } +} + +void AudioPolicyManagerStub::QueryEffectSceneModeInternal(MessageParcel &data, MessageParcel &reply) +{ + int i; + SupportedEffectConfig supportedEffectConfig; + int32_t ret = QueryEffectSceneMode(supportedEffectConfig); // audio_policy_server.cpp + if (ret == -1) { + AUDIO_ERR_LOG("default mode is unavailable !"); + return; + } + + int countEC = supportedEffectConfig.effectChains.size(); + int countPre = supportedEffectConfig.preProcessNew.stream.size(); + int countPost = supportedEffectConfig.postProcessNew.stream.size(); + reply.WriteInt32(countEC); + reply.WriteInt32(countPre); + reply.WriteInt32(countPost); + + if (countEC > 0) { + for (i = 0; i < countEC; i++) { + EffectChainProcess(supportedEffectConfig, reply, i); + } + } + if (countPre > 0) { + for (i = 0; i < countPre; i++) { + PreprocessProcess(supportedEffectConfig, reply, i); + } + } + if (countPost > 0) { + for (i = 0; i < countPost; i++) { + PostprocessProcess(supportedEffectConfig, reply, i); + } + } +} + int AudioPolicyManagerStub::OnRemoteRequest( uint32_t code, MessageParcel &data, MessageParcel &reply, MessageOption &option) { @@ -1155,6 +1254,10 @@ int AudioPolicyManagerStub::OnRemoteRequest( GetMaxStreamVolumeInternal(data, reply); break; + case QUERY_EFFECT_SCENEMODE: + QueryEffectSceneModeInternal(data, reply); + break; + default: AUDIO_ERR_LOG("default case, need check AudioPolicyManagerStub"); return IPCObjectStub::OnRemoteRequest(code, data, reply, option); diff --git a/services/audio_policy/server/src/audio_policy_server.cpp b/services/audio_policy/server/src/audio_policy_server.cpp index 62f0f1292e91db261c0264c2128d114c50b93255..08ca8199a535612e097f230983ffe0387ed877dc 100644 --- a/services/audio_policy/server/src/audio_policy_server.cpp +++ b/services/audio_policy/server/src/audio_policy_server.cpp @@ -1975,5 +1975,11 @@ float AudioPolicyServer::GetMaxStreamVolume() { return mPolicyService.GetMaxStreamVolume(); } + +int32_t AudioPolicyServer::QueryEffectSceneMode(SupportedEffectConfig &supportedEffectConfig) +{ + int32_t ret = mPolicyService.QueryEffectManagerSceneMode(supportedEffectConfig); + return ret; +} } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_policy/server/src/service/audio_policy_service.cpp b/services/audio_policy/server/src/service/audio_policy_service.cpp index e156570e8d79642eb28df5a3e912ca582d3c40df..6b961b90b672cfa690f5e6777c554783473eeb02 100644 --- a/services/audio_policy/server/src/service/audio_policy_service.cpp +++ b/services/audio_policy/server/src/service/audio_policy_service.cpp @@ -1735,7 +1735,51 @@ void AudioPolicyService::LoadEffectLibrary() if (!loadSuccess) { AUDIO_ERR_LOG("Load audio effect failed, please check log"); } + audioEffectManager_.UpdateAvailableEffects(successLoadedEffects); + audioEffectManager_.GetAvailableAEConfig(); + + AUDIO_INFO_LOG(" 1.get supported effect config"); + SupportedEffectConfig supportedEffectConfig; + audioEffectManager_.GetSupportedEffectConfig(supportedEffectConfig); + + AUDIO_INFO_LOG(" 2.availiable effectChain:%{public}d", supportedEffectConfig.effectChains.size()); + + bool createSuccess = gsp->CreateEffectChainManager(supportedEffectConfig.effectChains); + CHECK_AND_RETURN_LOG(createSuccess, "EffectChainManager create failed"); + // if(!createSuccess){ + // AUDIO_ERR_LOG("create audio effect chain manager failed, please check log"); + // } + + // Create sink for each effect + AudioModuleInfo moduleInfo = {}; + moduleInfo.lib = "libmodule-mixer-sink.z.so"; + moduleInfo.channels = "2"; + moduleInfo.rate = "48000"; + moduleInfo.format = "s16le"; // 16bit little endian + moduleInfo.name = "MIXER"; + AudioIOHandle ioHandle = audioPolicyManager_.OpenAudioPort(moduleInfo); + CHECK_AND_RETURN_LOG(ioHandle != OPEN_PORT_FAILURE, + "OpenAudioPort failed %{public}d", ioHandle); + IOHandles_[moduleInfo.name] = ioHandle; + + moduleInfo.lib = "libmodule-effect-sink.z.so"; + std::vector allSceneTypes; + allSceneTypes.push_back("SCENE_MUSIC"); + allSceneTypes.push_back("SCENE_MOVIE"); + allSceneTypes.push_back("SCENE_GAME"); + allSceneTypes.push_back("SCENE_SPEECH"); + allSceneTypes.push_back("SCENE_RING"); + allSceneTypes.push_back("SCENE_OTHERS"); + AUDIO_INFO_LOG("cjw: stream size %{public}d", supportedEffectConfig.postProcessNew.stream.size()); + for (std::string sceneType : allSceneTypes) { + AUDIO_INFO_LOG("cjw: scene name %{public}s", sceneType.c_str()); + moduleInfo.name = sceneType; + ioHandle = audioPolicyManager_.OpenAudioPort(moduleInfo); + CHECK_AND_RETURN_LOG(ioHandle != OPEN_PORT_FAILURE, + "OpenAudioPort failed %{public}d", ioHandle); + IOHandles_[moduleInfo.name] = ioHandle; + } } void AudioPolicyService::GetEffectManagerInfo(OriginalEffectConfig& oriEffectConfig, @@ -2562,5 +2606,12 @@ float AudioPolicyService::GetMaxStreamVolume() { return audioPolicyManager_.GetMaxStreamVolume(); } + +int32_t AudioPolicyService::QueryEffectManagerSceneMode(SupportedEffectConfig& supportedEffectConfig) +{ + int32_t ret = audioEffectManager_.QueryEffectManagerSceneMode(supportedEffectConfig); + return ret; +} + } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_policy/server/src/service/dump/audio_service_dump.cpp b/services/audio_policy/server/src/service/dump/audio_service_dump.cpp index 5e115f47b1ac92e3cdef1c7be4f63ad22619c1e2..52f0b016e36a842d121f7282128c420af7b1dade 100644 --- a/services/audio_policy/server/src/service/dump/audio_service_dump.cpp +++ b/services/audio_policy/server/src/service/dump/audio_service_dump.cpp @@ -546,22 +546,21 @@ static void EffectManagerInfoDumpPart(string& dumpString, AudioData &audioData_) { int32_t count; // xml -- Preprocess - for (Preprocess x : audioData_.policyData.oriEffectConfig.preprocess) { - AppendFormat(dumpString, "preprocess stream = %s \n", x.stream.c_str()); + for (Preprocess x : audioData_.policyData.oriEffectConfig.preProcess) { + AppendFormat(dumpString, "preProcess stream = %s \n", x.stream.c_str()); count = 0; for (string modeName : x.mode) { count++; AppendFormat(dumpString, " modeName%d = %s \n", count, modeName.c_str()); for (Device deviceInfo : x.device[count - 1]) { AppendFormat(dumpString, " device type = %s \n", deviceInfo.type.c_str()); - AppendFormat(dumpString, " device address = %s \n", deviceInfo.address.c_str()); AppendFormat(dumpString, " device chain = %s \n", deviceInfo.chain.c_str()); } } } // xml -- Postprocess - for (Postprocess x : audioData_.policyData.oriEffectConfig.postprocess) { + for (Postprocess x : audioData_.policyData.oriEffectConfig.postProcess) { AppendFormat(dumpString, "postprocess stream = %s \n", x.stream.c_str()); count = 0; for (string modeName : x.mode) { @@ -569,7 +568,6 @@ static void EffectManagerInfoDumpPart(string& dumpString, AudioData &audioData_) AppendFormat(dumpString, " modeName%d = %s \n", count, modeName.c_str()); for (Device deviceInfo : x.device[count - 1]) { AppendFormat(dumpString, " device type = %s \n", deviceInfo.type.c_str()); - AppendFormat(dumpString, " device address = %s \n", deviceInfo.address.c_str()); AppendFormat(dumpString, " device chain = %s \n", deviceInfo.chain.c_str()); } } @@ -593,7 +591,6 @@ void AudioServiceDump::EffectManagerInfoDump(string& dumpString) count++; AppendFormat(dumpString, "effect%d name = %s \n", count, x.name.c_str()); AppendFormat(dumpString, "effect%d libraryName = %s \n", count, x.libraryName.c_str()); - AppendFormat(dumpString, "effect%d effectId = %s \n", count, x.effectId.c_str()); } // xml -- effectChain @@ -614,7 +611,6 @@ void AudioServiceDump::EffectManagerInfoDump(string& dumpString) count++; AppendFormat(dumpString, "available Effect%d name = %s \n", count, x.name.c_str()); AppendFormat(dumpString, "available Effect%d libraryName = %s \n", count, x.libraryName.c_str()); - AppendFormat(dumpString, "available Effect%d effectId = %s \n", count, x.effectId.c_str()); } } diff --git a/services/audio_policy/server/src/service/effect/audio_effect_config_parser.cpp b/services/audio_policy/server/src/service/effect/audio_effect_config_parser.cpp index 7cc3d8678758f5d23506e6411c8c118b72c7fa2e..44cc493682f0a2ec2f219771698f21ce00fd34b6 100644 --- a/services/audio_policy/server/src/service/effect/audio_effect_config_parser.cpp +++ b/services/audio_policy/server/src/service/effect/audio_effect_config_parser.cpp @@ -19,7 +19,7 @@ namespace OHOS { namespace AudioStandard { static constexpr char AUDIO_EFFECT_CONFIG_FILE[] = "system/etc/audio/audio_effect_config.xml"; -static const std::string EFFECT_CONFIG_NAME[5] = {"libraries", "effects", "effectChains", "preprocess", "postprocess"}; +static const std::string EFFECT_CONFIG_NAME[5] = {"libraries", "effects", "effectChains", "preProcess", "postProcess"}; static constexpr int32_t FILE_CONTENT_ERROR = -2; static constexpr int32_t FILE_PARSE_ERROR = -3; static constexpr int32_t INDEX_LIBRARIES = 0; @@ -144,16 +144,12 @@ static void LoadEffect(OriginalEffectConfig &result, xmlNode* secondNode) AUDIO_ERR_LOG("missing information: effect has no name attribute"); } else if (!xmlHasProp(currNode, reinterpret_cast("library"))) { AUDIO_ERR_LOG("missing information: effect has no library attribute"); - } else if (!xmlHasProp(currNode, reinterpret_cast("effect_id"))) { - AUDIO_ERR_LOG("missing information: effect has no effect_id attribute"); } else { std::string pEffectName = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("name"))); std::string pEffectLib = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("library"))); - std::string pEffectID = reinterpret_cast - (xmlGetProp(currNode, reinterpret_cast("effect_id"))); - Effect tmp = {pEffectName, pEffectLib, pEffectID}; + Effect tmp = {pEffectName, pEffectLib}; result.effects.push_back(tmp); } } else { @@ -280,7 +276,7 @@ static void LoadPreDevice(OriginalEffectConfig &result, const xmlNode* fourthNod const int32_t modeNum, const int32_t streamNum) { if (!fourthNode->xmlChildrenNode) { - AUDIO_ERR_LOG("missing information: streamAE_mode has no child devicePort"); + AUDIO_ERR_LOG("missing information: streamEffectMode has no child devicePort"); return; } int32_t countDevice = 0; @@ -297,19 +293,15 @@ static void LoadPreDevice(OriginalEffectConfig &result, const xmlNode* fourthNod if (!xmlStrcmp(currNode->name, reinterpret_cast("devicePort"))) { if (!xmlHasProp(currNode, reinterpret_cast("type"))) { AUDIO_ERR_LOG("missing information: devicePort has no type attribute"); - } else if (!xmlHasProp(currNode, reinterpret_cast("address"))) { - AUDIO_ERR_LOG("missing information: devicePort has no address attribute"); } else if (!xmlHasProp(currNode, reinterpret_cast("effectChain"))) { AUDIO_ERR_LOG("missing information: devicePort has no effectChain attribute"); } else { std::string pDevType = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("type"))); - std::string pDevAddress = reinterpret_cast - (xmlGetProp(currNode, reinterpret_cast("address"))); std::string pChain = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("effectChain"))); - Device tmpdev = {pDevType, pDevAddress, pChain}; - result.preprocess[streamNum].device[modeNum].push_back(tmpdev); + Device tmpdev = {pDevType, pChain}; + result.preProcess[streamNum].device[modeNum].push_back(tmpdev); } } else { AUDIO_ERR_LOG("wrong name: %{public}s, should be devicePort", currNode->name); @@ -318,14 +310,14 @@ static void LoadPreDevice(OriginalEffectConfig &result, const xmlNode* fourthNod currNode = currNode->next; } if (countDevice == 0) { - AUDIO_ERR_LOG("missing information: streamAE_mode has no child devicePort"); + AUDIO_ERR_LOG("missing information: streamEffectMode has no child devicePort"); } } static void LoadPreMode(OriginalEffectConfig &result, const xmlNode* thirdNode, const int32_t streamNum) { if (!thirdNode->xmlChildrenNode) { - AUDIO_ERR_LOG("missing information: stream has no child streamAE_mode"); + AUDIO_ERR_LOG("missing information: stream has no child streamEffectMode"); return; } int32_t countMode = 0; @@ -333,7 +325,7 @@ static void LoadPreMode(OriginalEffectConfig &result, const xmlNode* thirdNode, xmlNode *currNode = thirdNode->xmlChildrenNode; while (currNode != nullptr) { if (countMode >= AUDIO_EFFECT_COUNT_UPPER_LIMIT) { - AUDIO_ERR_LOG("the number of streamAE_mode nodes exceeds limit: %{public}d", + AUDIO_ERR_LOG("the number of streamEffectMode nodes exceeds limit: %{public}d", AUDIO_EFFECT_COUNT_UPPER_LIMIT); return; } @@ -341,25 +333,25 @@ static void LoadPreMode(OriginalEffectConfig &result, const xmlNode* thirdNode, currNode = currNode->next; continue; } - if (!xmlStrcmp(currNode->name, reinterpret_cast("streamAE_mode"))) { + if (!xmlStrcmp(currNode->name, reinterpret_cast("streamEffectMode"))) { if (!xmlHasProp(currNode, reinterpret_cast("mode"))) { - AUDIO_ERR_LOG("missing information: streamAE_mode has no mode attribute"); + AUDIO_ERR_LOG("missing information: streamEffectMode has no mode attribute"); } else { std::string pStreamAEMode = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("mode"))); - result.preprocess[streamNum].mode.push_back(pStreamAEMode); - result.preprocess[streamNum].device.push_back({}); + result.preProcess[streamNum].mode.push_back(pStreamAEMode); + result.preProcess[streamNum].device.push_back({}); LoadPreDevice(result, currNode, modeNum, streamNum); modeNum++; } } else { - AUDIO_ERR_LOG("wrong name: %{public}s, should be streamAE_mode", currNode->name); + AUDIO_ERR_LOG("wrong name: %{public}s, should be streamEffectMode", currNode->name); } countMode++; currNode = currNode->next; } if (countMode == 0) { - AUDIO_ERR_LOG("missing information: stream has no child streamAE_mode"); + AUDIO_ERR_LOG("missing information: stream has no child streamEffectMode"); } } @@ -388,7 +380,7 @@ static void LoadPreProcess(OriginalEffectConfig &result, xmlNode* secondNode) std::string pStreamType = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("scene"))); tmp.stream = pStreamType; - result.preprocess.push_back(tmp); + result.preProcess.push_back(tmp); LoadPreMode(result, currNode, streamNum); streamNum++; } @@ -399,7 +391,7 @@ static void LoadPreProcess(OriginalEffectConfig &result, xmlNode* secondNode) currNode = currNode->next; } if (countPreprocess == 0) { - AUDIO_ERR_LOG("missing information: preprocess has no child stream"); + AUDIO_ERR_LOG("missing information: preProcess has no child stream"); } } @@ -409,14 +401,14 @@ static void LoadEffectConfigPreProcess(OriginalEffectConfig &result, const xmlNo if (countFirstNode[INDEX_PREPROCESS] >= AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT) { if (countFirstNode[INDEX_PREPROCESS] == AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT) { countFirstNode[INDEX_PREPROCESS]++; - AUDIO_ERR_LOG("the number of preprocess nodes exceeds limit: %{public}d", + AUDIO_ERR_LOG("the number of preProcess nodes exceeds limit: %{public}d", AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT); } } else if (currNode->xmlChildrenNode) { LoadPreProcess(result, currNode->xmlChildrenNode); countFirstNode[INDEX_PREPROCESS]++; } else { - AUDIO_ERR_LOG("missing information: preprocess has no child stream"); + AUDIO_ERR_LOG("missing information: preProcess has no child stream"); countFirstNode[INDEX_PREPROCESS]++; } } @@ -425,7 +417,7 @@ static void LoadPostDevice(OriginalEffectConfig &result, const xmlNode* fourthNo const int32_t modeNum, const int32_t streamNum) { if (!fourthNode->xmlChildrenNode) { - AUDIO_ERR_LOG("missing information: streamAE_mode has no child devicePort"); + AUDIO_ERR_LOG("missing information: streamEffectMode has no child devicePort"); return; } int32_t countDevice = 0; @@ -442,19 +434,15 @@ static void LoadPostDevice(OriginalEffectConfig &result, const xmlNode* fourthNo if (!xmlStrcmp(currNode->name, reinterpret_cast("devicePort"))) { if (!xmlHasProp(currNode, reinterpret_cast("type"))) { AUDIO_ERR_LOG("missing information: devicePort has no type attribute"); - } else if (!xmlHasProp(currNode, reinterpret_cast("address"))) { - AUDIO_ERR_LOG("missing information: devicePort has no address attribute"); } else if (!xmlHasProp(currNode, reinterpret_cast("effectChain"))) { AUDIO_ERR_LOG("missing information: devicePort has no effectChain attribute"); } else { std::string pDevType = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("type"))); - std::string pDevAddress = reinterpret_cast - (xmlGetProp(currNode, reinterpret_cast("address"))); std::string pChain = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("effectChain"))); - Device tmpdev = {pDevType, pDevAddress, pChain}; - result.postprocess[streamNum].device[modeNum].push_back(tmpdev); + Device tmpdev = {pDevType, pChain}; + result.postProcess[streamNum].device[modeNum].push_back(tmpdev); } } else { AUDIO_ERR_LOG("wrong name: %{public}s, should be devicePort", currNode->name); @@ -463,14 +451,14 @@ static void LoadPostDevice(OriginalEffectConfig &result, const xmlNode* fourthNo currNode = currNode->next; } if (countDevice == 0) { - AUDIO_ERR_LOG("missing information: streamAE_mode has no child devicePort"); + AUDIO_ERR_LOG("missing information: streamEffectMode has no child devicePort"); } } static void LoadPostMode(OriginalEffectConfig &result, const xmlNode* thirdNode, const int32_t streamNum) { if (!thirdNode->xmlChildrenNode) { - AUDIO_ERR_LOG("missing information: stream has no child streamAE_mode"); + AUDIO_ERR_LOG("missing information: stream has no child streamEffectMode"); return; } int32_t countMode = 0; @@ -478,7 +466,7 @@ static void LoadPostMode(OriginalEffectConfig &result, const xmlNode* thirdNode, xmlNode *currNode = thirdNode->xmlChildrenNode; while (currNode != nullptr) { if (countMode >= AUDIO_EFFECT_COUNT_UPPER_LIMIT) { - AUDIO_ERR_LOG("the number of streamAE_mode nodes exceeds limit: %{public}d", + AUDIO_ERR_LOG("the number of streamEffectMode nodes exceeds limit: %{public}d", AUDIO_EFFECT_COUNT_UPPER_LIMIT); return; } @@ -486,25 +474,25 @@ static void LoadPostMode(OriginalEffectConfig &result, const xmlNode* thirdNode, currNode = currNode->next; continue; } - if (!xmlStrcmp(currNode->name, reinterpret_cast("streamAE_mode"))) { + if (!xmlStrcmp(currNode->name, reinterpret_cast("streamEffectMode"))) { if (!xmlHasProp(currNode, reinterpret_cast("mode"))) { - AUDIO_ERR_LOG("missing information: streamAE_mode has no mode attribute"); + AUDIO_ERR_LOG("missing information: streamEffectMode has no mode attribute"); } else { std::string pStreamAEMode = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("mode"))); - result.postprocess[streamNum].mode.push_back(pStreamAEMode); - result.postprocess[streamNum].device.push_back({}); + result.postProcess[streamNum].mode.push_back(pStreamAEMode); + result.postProcess[streamNum].device.push_back({}); LoadPostDevice(result, currNode, modeNum, streamNum); modeNum++; } } else { - AUDIO_ERR_LOG("wrong name: %{public}s, should be streamAE_mode", currNode->name); + AUDIO_ERR_LOG("wrong name: %{public}s, should be streamEffectMode", currNode->name); } countMode++; currNode = currNode->next; } if (countMode == 0) { - AUDIO_ERR_LOG("missing information: stream has no child streamAE_mode"); + AUDIO_ERR_LOG("missing information: stream has no child streamEffectMode"); } } @@ -534,7 +522,7 @@ static void LoadPostProcess(OriginalEffectConfig &result, xmlNode* secondNode) std::string pStreamType = reinterpret_cast (xmlGetProp(currNode, reinterpret_cast("scene"))); tmp.stream = pStreamType; - result.postprocess.push_back(tmp); + result.postProcess.push_back(tmp); LoadPostMode(result, currNode, streamNum); streamNum++; } @@ -545,7 +533,7 @@ static void LoadPostProcess(OriginalEffectConfig &result, xmlNode* secondNode) currNode = currNode->next; } if (countPostprocess == 0) { - AUDIO_ERR_LOG("missing information: postprocess has no child stream"); + AUDIO_ERR_LOG("missing information: postProcess has no child stream"); } } @@ -555,14 +543,14 @@ static void LoadEffectConfigPostProcess(OriginalEffectConfig &result, const xmlN if (countFirstNode[INDEX_POSTPROCESS] >= AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT) { if (countFirstNode[INDEX_POSTPROCESS] == AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT) { countFirstNode[INDEX_POSTPROCESS]++; - AUDIO_ERR_LOG("the number of postprocess nodes exceeds limit: %{public}d", + AUDIO_ERR_LOG("the number of postProcess nodes exceeds limit: %{public}d", AUDIO_EFFECT_COUNT_FIRST_NODE_UPPER_LIMIT); } } else if (currNode->xmlChildrenNode) { LoadPostProcess(result, currNode->xmlChildrenNode); countFirstNode[INDEX_POSTPROCESS]++; } else { - AUDIO_ERR_LOG("missing information: postprocess has no child stream"); + AUDIO_ERR_LOG("missing information: postProcess has no child stream"); countFirstNode[INDEX_POSTPROCESS]++; } } @@ -616,9 +604,9 @@ int32_t AudioEffectConfigParser::LoadEffectConfig(OriginalEffectConfig &result) LoadEffectConfigEffects(result, currNode, countFirstNode); } else if (!xmlStrcmp(currNode->name, reinterpret_cast("effectChains"))) { LoadEffectConfigEffectChains(result, currNode, countFirstNode); - } else if (!xmlStrcmp(currNode->name, reinterpret_cast("preprocess"))) { + } else if (!xmlStrcmp(currNode->name, reinterpret_cast("preProcess"))) { LoadEffectConfigPreProcess(result, currNode, countFirstNode); - } else if (!xmlStrcmp(currNode->name, reinterpret_cast("postprocess"))) { + } else if (!xmlStrcmp(currNode->name, reinterpret_cast("postProcess"))) { LoadEffectConfigPostProcess(result, currNode, countFirstNode); } else { LoadEffectConfigException(result, currNode, countFirstNode); diff --git a/services/audio_policy/server/src/service/effect/audio_effect_manager.cpp b/services/audio_policy/server/src/service/effect/audio_effect_manager.cpp index ac76df30a92c7b2bfd42947fd4c6f76fd00946b2..110c9b30097cb2929bd955ca8072f52f0140d771 100644 --- a/services/audio_policy/server/src/service/effect/audio_effect_manager.cpp +++ b/services/audio_policy/server/src/service/effect/audio_effect_manager.cpp @@ -14,6 +14,7 @@ */ #include "audio_effect_manager.h" +#include namespace OHOS { namespace AudioStandard { @@ -51,6 +52,391 @@ void AudioEffectManager::UpdateAvailableEffects(std::vector &newAvailabl { availableEffects_ = newAvailableEffects; } + +int32_t AudioEffectManager::QueryEffectManagerSceneMode(SupportedEffectConfig &supportedEffectConfig) +{ + supportedEffectConfig = supportedEffectConfig_; + return existDefault_; +} + +void AudioEffectManager::GetSupportedEffectConfig(SupportedEffectConfig &supportedEffectConfig) +{ + supportedEffectConfig = supportedEffectConfig_; +} + +static int32_t UpdateUnsupportedScene(std::string &scene) +{ + int isSupported = 0; + if ((scene != "SCENE_MUSIC") && + (scene != "SCENE_MOVIE") && + (scene != "SCENE_GAME") && + (scene != "SCENE_SPEECH") && + (scene != "SCENE_RING") && + (scene != "SCENE_OTHERS")) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG9]:stream-> The scene of %{public}s is unsupported, \ + and this scene is deleted!", scene.c_str()); + isSupported = -1; + } + return isSupported; +} + +static void UpdateUnsupportedDevicePre(Preprocess &pp, Stream &stream, std::string &mode, int i, int j) +{ + StreamEffectMode streamEffectMode; + streamEffectMode.mode = mode; + j = 0; + for (auto &device: pp.device) { + if (i == j) { + for (auto &eachDevice: device) { + streamEffectMode.devicePort.push_back(eachDevice); + } + break; + } + j += 1; + } + i += 1; + stream.streamEffectMode.push_back(streamEffectMode); +} + +static void UpdateUnsupportedModePre(Preprocess &pp, Stream &stream, std::string &mode, int i) +{ + int j = 0; + int isSupported = 0; + if ((mode != "EFFECT_NONE") && + (mode != "EFFECT_DEFAULT")) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG10]:mode-> The %{public}s mode of %{public}s is unsupported, \ + and this mode is deleted!", mode.c_str(), stream.scene.c_str()); + isSupported = -1; + } + if (isSupported == 0) { + UpdateUnsupportedDevicePre(pp, stream, mode, i, j); + } +} + +static void UpdateUnsupportedDevicePost(Postprocess &pp, Stream &stream, std::string &mode, int i, int j) +{ + StreamEffectMode streamEffectMode; + streamEffectMode.mode = mode; + j = 0; + for (auto &device: pp.device) { + if (i == j) { + for (auto &a: device) { + streamEffectMode.devicePort.push_back(a); + } + break; + } + j += 1; + } + i += 1; + stream.streamEffectMode.push_back(streamEffectMode); +} + +static void UpdateUnsupportedModePost(Postprocess &pp, Stream &stream, std::string &mode, int i) +{ + int j = 0; + int isSupported = 0; + if ((mode != "EFFECT_NONE") && + (mode != "EFFECT_DEFAULT")) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG10]:mode-> The %{public}s mode of %{public}s is unsupported, \ + and this mode is deleted!", mode.c_str(), stream.scene.c_str()); + isSupported = -1; + } + if (isSupported == 0) { + UpdateUnsupportedDevicePost(pp, stream, mode, i, j); + } +} + +static int32_t UpdateAvailableStreamPre(ProcessNew &preProcessNew, Preprocess &pp) +{ + int i; + int32_t isDuplicate = 0; + int32_t isSupported = UpdateUnsupportedScene(pp.stream); + auto it = std::find_if(preProcessNew.stream.begin(), preProcessNew.stream.end(), [&pp](const Stream& x) { + return x.scene == pp.stream; + }); + if ((it == preProcessNew.stream.end()) && (isSupported == 0)) { + Stream stream; + stream.scene = pp.stream; + i = 0; + for (auto &mode: pp.mode) { + UpdateUnsupportedModePre(pp, stream, mode, i); + } + preProcessNew.stream.push_back(stream); + } else if (it != preProcessNew.stream.end()) { + isDuplicate = 1; + } + return isDuplicate; +} + +static int32_t UpdateAvailableStreamPost(ProcessNew &postProcessNew, Postprocess &pp) +{ + int i; + int32_t isDuplicate = 0; + int32_t isSupported = UpdateUnsupportedScene(pp.stream); + auto it = std::find_if(postProcessNew.stream.begin(), postProcessNew.stream.end(), [&pp](const Stream& x) { + return x.scene == pp.stream; + }); + if ((it == postProcessNew.stream.end()) && (isSupported == 0)) { + Stream stream; + stream.scene = pp.stream; + i = 0; + for (auto &mode: pp.mode) { + UpdateUnsupportedModePost(pp, stream, mode, i); + } + postProcessNew.stream.push_back(stream); + } else if (it != postProcessNew.stream.end()) { + isDuplicate = 1; + } + return isDuplicate; +} + +void AudioEffectManager::UpdateEffectChains(std::vector &availableLayout) +{ + int count = 0; + std::vector deviceDelIdx; + for (auto &ec: supportedEffectConfig_.effectChains) { + for (auto &effectName: ec.apply) { + auto it = std::find_if(availableEffects_.begin(), availableEffects_.end(), + [&effectName](const Effect& effect) { + return effect.name == effectName; + }); + if (it == availableEffects_.end()) { + deviceDelIdx.emplace_back(count); + break; + } + } + count += 1; + } + for (auto it = deviceDelIdx.rbegin(); it != deviceDelIdx.rend(); ++it) { + supportedEffectConfig_.effectChains.erase(supportedEffectConfig_.effectChains.begin() + *it); + } + if (supportedEffectConfig_.effectChains.empty()) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG1]:effectChains-> all effectChains are unavailable"); + } + for (auto ec: supportedEffectConfig_.effectChains) { + availableLayout.emplace_back(ec.name); + } +} + +void AudioEffectManager::UpdateAvailableAEConfig(OriginalEffectConfig &aeConfig) +{ + int32_t isDuplicate = 0; + int32_t ret; + supportedEffectConfig_.effectChains = aeConfig.effectChains; + ProcessNew preProcessNew; + for (Preprocess &pp: aeConfig.preProcess) { + ret = UpdateAvailableStreamPre(preProcessNew, pp); + if (ret == 1) { + isDuplicate = 1; + } + } + ProcessNew postProcessNew; + for (Postprocess &pp: aeConfig.postProcess) { + ret = UpdateAvailableStreamPost(postProcessNew, pp); + if (ret == 1) { + isDuplicate = 1; + } + } + if (isDuplicate == 1) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG2]:stream-> The duplicate stream is deleted, \ + and the first configuration is retained!"); + } + supportedEffectConfig_.preProcessNew = preProcessNew; + supportedEffectConfig_.postProcessNew = postProcessNew; +} + +void AudioEffectManager::UpdateDuplicateBypassMode(ProcessNew &preProcessNew) +{ + int flag, count; + flag = 0; + std::vector deviceDelIdx; + for (auto &stream: preProcessNew.stream) { + count = 0; + deviceDelIdx.clear(); + for (auto &streamEffectMode: stream.streamEffectMode) { + if (streamEffectMode.mode == "EFFECT_NONE") { + deviceDelIdx.push_back(count); + } + count += 1; + } + for (auto it = deviceDelIdx.rbegin(); it != deviceDelIdx.rend(); ++it) { + stream.streamEffectMode[*it].devicePort = {}; + flag = -1; + } + } + if (flag == -1) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG3]:mode-> EFFECT_NONE can not configure by deveploer!"); + } +} + +void AudioEffectManager::UpdateDuplicateMode(ProcessNew &preProcessNew) +{ + std::unordered_set seen; + std::vector toRemove; + uint32_t i; + for (auto &stream: preProcessNew.stream) { + seen.clear(); + toRemove.clear(); + for (i = 0; i < stream.streamEffectMode.size(); i++) { + if (seen.count(stream.streamEffectMode[i].mode)) { + toRemove.push_back(i); + } else { + seen.insert(stream.streamEffectMode[i].mode); + } + } + for (auto it = toRemove.rbegin(); it != toRemove.rend(); ++it) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG4]:mode-> The duplicate mode of %{public}s configuration \ + is deleted, and the first configuration is retained!", stream.scene.c_str()); + stream.streamEffectMode.erase(stream.streamEffectMode.begin() + *it); + } + } +} + +static void UpdateDuplicateDeviceRecord(StreamEffectMode &streamEffectMode, Stream &stream) +{ + uint32_t i; + std::unordered_set seen; + std::vector toRemove; + seen.clear(); + toRemove.clear(); + for (i = 0; i < streamEffectMode.devicePort.size(); i++) { + if (seen.count(streamEffectMode.devicePort[i].type)) { + toRemove.push_back(i); + } else { + seen.insert(streamEffectMode.devicePort[i].type); + } + } + for (auto it = toRemove.rbegin(); it != toRemove.rend(); ++it) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG5]:device-> The duplicate device of %{public}s's %{public}s \ + mode configuration is deleted, and the first configuration is retained!", + stream.scene.c_str(), streamEffectMode.mode.c_str()); + streamEffectMode.devicePort.erase(streamEffectMode.devicePort.begin() + *it); + } +} + +void AudioEffectManager::UpdateDuplicateDevice(ProcessNew &preProcessNew) +{ + for (auto &stream: preProcessNew.stream) { + for (auto &streamEffectMode: stream.streamEffectMode) { + UpdateDuplicateDeviceRecord(streamEffectMode, stream); + } + } +} + +static int32_t UpdateUnavailableMode(std::vector &modeDelIdx, Stream &stream) +{ + int ret = 0; + for (auto it = modeDelIdx.rbegin(); it != modeDelIdx.rend(); ++it) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG7]:mode-> %{public}s's %{public}s mode is deleted!", + stream.scene.c_str(), stream.streamEffectMode[*it].mode.c_str()); + if (stream.streamEffectMode[*it].mode == "PLAYBACK_DEAFULT") { + ret = -1; + } + stream.streamEffectMode.erase(stream.streamEffectMode.begin() + *it); + if (stream.streamEffectMode.empty()) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG8]:mode-> %{public}s's mode is only EFFECT_NONE!", + stream.scene.c_str()); + StreamEffectMode streamEffectMode; + streamEffectMode.mode = "EFFECT_NONE"; + stream.streamEffectMode.push_back(streamEffectMode); + } + } + if (stream.streamEffectMode.empty()) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG8]:mode-> %{public}s's mode is only EFFECT_NONE!", + stream.scene.c_str()); + StreamEffectMode streamEffectMode; + streamEffectMode.mode = "EFFECT_NONE"; + stream.streamEffectMode.push_back(streamEffectMode); + } + return ret; +} + +static void UpdateUnavailableEffectChainsRecord(std::vector &availableLayout, Stream &stream, + StreamEffectMode &streamEffectMode, std::vector &modeDelIdx, int modeCount) +{ + std::vector deviceDelIdx; + deviceDelIdx.clear(); + int deviceCount = 0; + if (streamEffectMode.devicePort.empty()) { + modeDelIdx.push_back(modeCount); + } + for (auto &devicePort: streamEffectMode.devicePort) { + auto index = std::find(availableLayout.begin(), availableLayout.end(), devicePort.chain); + if (index == availableLayout.end()) { + deviceDelIdx.push_back(deviceCount); + } + deviceCount += 1; + } + if (streamEffectMode.devicePort.size() != deviceDelIdx.size() && deviceDelIdx.size() != 0) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG6]:device-> The unavailable effectChain \ + of %{public}s's %{public}s mode are set to LAYOUT_BYPASS!", + stream.scene.c_str(), streamEffectMode.mode.c_str()); + for (auto it = deviceDelIdx.rbegin(); it != deviceDelIdx.rend(); ++it) { + streamEffectMode.devicePort[*it].chain = "LAYOUT_BYPASS"; + } + } else { + for (auto it = deviceDelIdx.rbegin(); it != deviceDelIdx.rend(); ++it) { + streamEffectMode.devicePort.erase(streamEffectMode.devicePort.begin() + *it); + if (streamEffectMode.devicePort.empty()) { + modeDelIdx.push_back(modeCount); + } + } + } + modeCount += 1; +} + +int32_t AudioEffectManager::UpdateUnavailableEffectChains(std::vector &availableLayout, + ProcessNew &processNew) +{ + int modeCount, ret; + + std::vector modeDelIdx; + for (auto &stream: processNew.stream) { + modeDelIdx.clear(); + modeCount = 0; + for (auto &streamEffectMode: stream.streamEffectMode) { + UpdateUnavailableEffectChainsRecord(availableLayout, stream, streamEffectMode, modeDelIdx, modeCount); + } + ret = UpdateUnavailableMode(modeDelIdx, stream); + } + return ret; +} + +void AudioEffectManager::GetAvailableAEConfig() +{ + int32_t ret; + std::vector availableLayout; + existDefault_ = 1; + if (oriEffectConfig_.effectChains.size() == 0) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG12]: effectChains is none!"); + } + if (oriEffectConfig_.preProcess.size() == 0) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG11]: preProcess is none!"); + } + if (oriEffectConfig_.postProcess.size() == 0) { + AUDIO_INFO_LOG("[supportedEffectConfig LOG13]: postProcess is none!"); + } + + UpdateAvailableAEConfig(oriEffectConfig_); + UpdateEffectChains(availableLayout); + + UpdateDuplicateBypassMode(supportedEffectConfig_.preProcessNew); + UpdateDuplicateMode(supportedEffectConfig_.preProcessNew); + UpdateDuplicateDevice(supportedEffectConfig_.preProcessNew); + ret = UpdateUnavailableEffectChains(availableLayout, supportedEffectConfig_.preProcessNew); + if (ret != 0) { + existDefault_ = -1; + } + + UpdateDuplicateBypassMode(supportedEffectConfig_.postProcessNew); + UpdateDuplicateMode(supportedEffectConfig_.postProcessNew); + UpdateDuplicateDevice(supportedEffectConfig_.postProcessNew); + ret = UpdateUnavailableEffectChains(availableLayout, supportedEffectConfig_.postProcessNew); + if (ret != 0) { + existDefault_ = -1; + } +} + } // namespce AudioStandard } // namespace OHOS \ No newline at end of file diff --git a/services/audio_policy/server/src/service/manager/audio_adapter_manager.cpp b/services/audio_policy/server/src/service/manager/audio_adapter_manager.cpp index 75f8f920260afedfe73724d5d92a2acf8fcd7cc8..ef543caa7c468f18c092439cc608cca23aa35e1c 100644 --- a/services/audio_policy/server/src/service/manager/audio_adapter_manager.cpp +++ b/services/audio_policy/server/src/service/manager/audio_adapter_manager.cpp @@ -531,6 +531,18 @@ std::string AudioAdapterManager::GetModuleArgs(const AudioModuleInfo &audioModul args = "file="; args.append(audioModuleInfo.fileName); } + } else if (audioModuleInfo.lib == MIXER_SINK) { + UpdateCommonArgs(audioModuleInfo, args); + if (!audioModuleInfo.name.empty()) { + args.append(" sink_name="); + args.append(audioModuleInfo.name); + } + } else if (audioModuleInfo.lib == EFFECT_SINK) { + UpdateCommonArgs(audioModuleInfo, args); + if (!audioModuleInfo.name.empty()) { + args.append(" sink_name="); + args.append(audioModuleInfo.name); + } } return args; } diff --git a/services/audio_service/BUILD.gn b/services/audio_service/BUILD.gn index 497a557f3ba0616e36d86275e03f4d34180e4a78..4d3cb10627a1ebf239780baa9b5f9972d27fcfd6 100644 --- a/services/audio_service/BUILD.gn +++ b/services/audio_service/BUILD.gn @@ -173,6 +173,7 @@ config("audio_service_config") { "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiocommon/include", "//foundation/multimedia/audio_framework/interfaces/inner_api/native/audiomanager/include", "//foundation/multimedia/audio_framework/frameworks/native/audiopolicy/include", + "//foundation/multimedia/audio_framework/frameworks/native/audioeffect/include", "${audio_fwk_path}/frameworks/native/audioutils/include", "${fwk_native_hdiadapter_path}/sink/bluetooth", "${fwk_native_hdiadapter_path}/sink/common", @@ -258,6 +259,7 @@ ohos_shared_library("audio_service") { "${fwk_native_hdiadapter_path}/sink:renderer_sink_adapter", "${fwk_native_hdiadapter_path}/source:audio_capturer_source", "${fwk_native_hdiadapter_path}/source:capturer_source_adapter", + "//foundation/multimedia/audio_framework/frameworks/native/audioeffect:audio_effect", ] external_deps = [ diff --git a/services/audio_service/client/include/audio_manager_base.h b/services/audio_service/client/include/audio_manager_base.h index 34c0a624c456241a2627666db5b6914ca5dd4595..b4646af91f044094a35d7905cfb60b269c192c27 100644 --- a/services/audio_service/client/include/audio_manager_base.h +++ b/services/audio_service/client/include/audio_manager_base.h @@ -21,6 +21,7 @@ #include "iremote_proxy.h" #include "iremote_stub.h" #include "audio_info.h" +#include "audio_effect.h" namespace OHOS { namespace AudioStandard { @@ -185,6 +186,9 @@ public: virtual bool LoadAudioEffectLibraries(std::vector libraries, std::vector effects, std::vector &successEffects) = 0; + virtual bool CreateEffectChainManager(std::vector effectChains) = 0; + + enum { GET_MAX_VOLUME = 0, GET_MIN_VOLUME = 1, @@ -207,6 +211,7 @@ public: SET_AUDIO_BALANCE_VALUE = 18, CREATE_AUDIOPROCESS = 19, LOAD_AUDIO_EFFECT_LIBRARIES = 20, + CREATE_AUDIO_EFFECT_CHAIN_MANAGER = 21, }; public: diff --git a/services/audio_service/client/include/audio_manager_proxy.h b/services/audio_service/client/include/audio_manager_proxy.h index c6a52e0a0998725fdcf6eda7501386f69eb42430..3ccb5e40bf154bd35be9380b43d1982a969e740a 100644 --- a/services/audio_service/client/include/audio_manager_proxy.h +++ b/services/audio_service/client/include/audio_manager_proxy.h @@ -49,6 +49,7 @@ public: sptr CreateAudioProcess(const AudioProcessConfig &config) override; bool LoadAudioEffectLibraries(const std::vector libraries, const std::vector effects, std::vector &successEffects) override; + bool CreateEffectChainManager(std::vector effectChains) override; private: static inline BrokerDelegator delegator_; }; diff --git a/services/audio_service/client/include/audio_service_client.h b/services/audio_service/client/include/audio_service_client.h index b2d0b81942e9a06129c6b3051227f40a91fefb70..f57a5584adabad7fe76965b1f59edf986d88e7dc 100644 --- a/services/audio_service/client/include/audio_service_client.h +++ b/services/audio_service/client/include/audio_service_client.h @@ -481,6 +481,22 @@ public: */ AudioRenderMode GetAudioRenderMode(); + /** + * @brief Gets the audio effect mode. + * + * @return Returns current audio effect mode. + */ + AudioEffectMode GetStreamAudioEffectMode(); + + /** + * @brief Sets the audio effect mode. + * + * * @param effectMode The audio effect mode at which the stream needs to be rendered. + * @return Returns {@link SUCCESS} if audio effect mode is successfully set; returns an error code + * defined in {@link audio_errors.h} otherwise. + */ + int32_t SetStreamAudioEffectMode(AudioEffectMode effectMode); + int32_t SetAudioCaptureMode(AudioCaptureMode captureMode); AudioCaptureMode GetAudioCaptureMode(); /** @@ -508,6 +524,14 @@ public: void SetClientID(int32_t clientPid, int32_t clientUid); + /** + * Gets the audio effect scene name + * + * @param audioType indicate the stream type like music, system, ringtone etc + * @return Returns the audio effect scene name. + */ + static const std::string GetEffectSceneName(AudioStreamType audioType); + protected: virtual void ProcessEvent(const AppExecFwk::InnerEvent::Pointer &event) override; void SendWriteBufferRequestEvent(); @@ -570,6 +594,8 @@ private: uint32_t rendererSampleRate; AudioRenderMode renderMode_; AudioCaptureMode captureMode_; + std::string effectSceneName = ""; + AudioEffectMode effectMode; std::shared_ptr readCallback_; std::shared_ptr writeCallback_; int64_t mWriteCbStamp = 0; // used to measure callback duration @@ -645,6 +671,7 @@ private: static const std::string GetStreamName(AudioStreamType audioType); static pa_sample_spec ConvertToPAAudioParams(AudioStreamParams audioParams); static AudioStreamParams ConvertFromPAAudioParams(pa_sample_spec paSampleSpec); + static const std::string GetEffectModeName(AudioEffectMode effectMode); static constexpr float MAX_STREAM_VOLUME_LEVEL = 1.0f; static constexpr float MIN_STREAM_VOLUME_LEVEL = 0.0f; diff --git a/services/audio_service/client/src/audio_manager_proxy.cpp b/services/audio_service/client/src/audio_manager_proxy.cpp index 01ea1ebadb75d7af7e3629e408ab4e6f72079d26..7d6f837a990419b1815998f35c9e4cf2e6fd4a5e 100644 --- a/services/audio_service/client/src/audio_manager_proxy.cpp +++ b/services/audio_service/client/src/audio_manager_proxy.cpp @@ -452,7 +452,6 @@ bool AudioManagerProxy::LoadAudioEffectLibraries(const vector libraries for (Effect x : effects) { dataParcel.WriteString(x.name); dataParcel.WriteString(x.libraryName); - dataParcel.WriteString(x.effectId); } error = Remote()->SendRequest(LOAD_AUDIO_EFFECT_LIBRARIES, dataParcel, replyParcel, option); @@ -470,11 +469,49 @@ bool AudioManagerProxy::LoadAudioEffectLibraries(const vector libraries for (i = 0; i < successEffSize; i++) { string effectName = replyParcel.ReadString(); string libName = replyParcel.ReadString(); - string effectId = replyParcel.ReadString(); - successEffects.push_back({effectName, libName, effectId}); + successEffects.push_back({effectName, libName}); } return true; } + +bool AudioManagerProxy::CreateEffectChainManager(std::vector effectChains) +{ + int32_t error; + + MessageParcel dataParcel, replyParcel; + MessageOption option; + if (!dataParcel.WriteInterfaceToken(GetDescriptor())) { + AUDIO_ERR_LOG("AudioManagerProxy: WriteInterfaceToken failed"); + return false; + } + + int32_t countEffectChains = effectChains.size(); + std::vector listCountEffects; + + for(EffectChain &effectChain: effectChains){ + listCountEffects.emplace_back(effectChain.apply.size()); + } + + dataParcel.WriteInt32(countEffectChains); + for(int32_t countEffects: listCountEffects){ + dataParcel.WriteInt32(countEffects); + } + + for(EffectChain &effectChain: effectChains){ + dataParcel.WriteString(effectChain.name); + for(std::string applyName: effectChain.apply){ + dataParcel.WriteString(applyName); + } + } + + error = Remote()->SendRequest(CREATE_AUDIO_EFFECT_CHAIN_MANAGER, dataParcel, replyParcel, option); + if (error != ERR_NONE) { + AUDIO_ERR_LOG("CreatAudioEffectChainManager failed, error: %{public}d", error); + return false; + } + return true; +} + } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_service/client/src/audio_service_client.cpp b/services/audio_service/client/src/audio_service_client.cpp index 2cbce3e44d55282ecfafbdb399206aaa049bf7d0..b0f527312122c2b875efb57fdd97208b2cd56a9a 100644 --- a/services/audio_service/client/src/audio_service_client.cpp +++ b/services/audio_service/client/src/audio_service_client.cpp @@ -450,6 +450,8 @@ AudioServiceClient::AudioServiceClient() captureMode_ = CAPTURE_MODE_NORMAL; eAudioClientType = AUDIO_SERVICE_CLIENT_PLAYBACK; + effectSceneName = "SCENE_MUSIC"; + effectMode = EFFECT_DEFAULT; mFrameSize = 0; mFrameMarkPosition = 0; @@ -993,6 +995,11 @@ int32_t AudioServiceClient::CreateStream(AudioStreamParams audioParams, AudioStr if (SetStreamRenderRate(renderRate) != AUDIO_CLIENT_SUCCESS) { AUDIO_ERR_LOG("Set render rate failed"); } + + effectSceneName = GetEffectSceneName(audioType); + if (SetStreamAudioEffectMode(effectMode) != AUDIO_CLIENT_SUCCESS) { + AUDIO_ERR_LOG("Set audio effect mode failed"); + } } state_ = PREPARED; @@ -2004,6 +2011,7 @@ int32_t AudioServiceClient::SetStreamType(AudioStreamType audioStreamType) mStreamType = audioStreamType; const std::string streamName = GetStreamName(audioStreamType); + effectSceneName = GetEffectSceneName(audioStreamType); pa_proplist *propList = pa_proplist_new(); if (propList == nullptr) { @@ -2014,6 +2022,7 @@ int32_t AudioServiceClient::SetStreamType(AudioStreamType audioStreamType) pa_proplist_sets(propList, "stream.type", streamName.c_str()); pa_proplist_sets(propList, "media.name", streamName.c_str()); + pa_proplist_sets(propList, "scene.type", effectSceneName.c_str()); pa_operation *updatePropOperation = pa_stream_proplist_update(paStream, PA_UPDATE_REPLACE, propList, nullptr, nullptr); pa_proplist_free(propList); @@ -2719,5 +2728,87 @@ void AudioServiceClient::ProcessEvent(const AppExecFwk::InnerEvent::Pointer &eve } } +const std::string AudioServiceClient::GetEffectSceneName(AudioStreamType audioType) +{ + std::string name; + switch (audioType) { + case STREAM_DEFAULT: + name = "SCENE_MUSIC"; + break; + case STREAM_MUSIC: + name = "SCENE_MUSIC"; + break; + case STREAM_MEDIA: + name = "SCENE_MOVIE"; + break; + case STREAM_TTS: + name = "SCENE_SPEECH"; + break; + case STREAM_RING: + name = "SCENE_RING"; + break; + case STREAM_ALARM: + name = "SCENE_RING"; + break; + default: + name = "SCENE_OTHERS"; + } + + const std::string sceneName = name; + return sceneName; +} + +const std::string AudioServiceClient::GetEffectModeName(AudioEffectMode effectMode) +{ + std::string name; + switch (effectMode) { + case EFFECT_NONE: + name = "EFFECT_NONE"; + break; + default: + name = "EFFECT_DEFAULT"; + } + + const std::string modeName = name; + return modeName; +} + +AudioEffectMode AudioServiceClient::GetStreamAudioEffectMode() +{ + return effectMode; +} + +int32_t AudioServiceClient::SetStreamAudioEffectMode(AudioEffectMode audioEffectMode) +{ + AUDIO_INFO_LOG("SetStreamAudioEffectMode: %{public}d", audioEffectMode); + + if (CheckPaStatusIfinvalid(mainLoop, context, paStream, AUDIO_CLIENT_PA_ERR) < 0) { + AUDIO_ERR_LOG("set stream audio effect mode: invalid stream state"); + return AUDIO_CLIENT_PA_ERR; + } + + pa_threaded_mainloop_lock(mainLoop); + + effectMode = audioEffectMode; + const std::string effectModeName = GetEffectModeName(audioEffectMode); + + pa_proplist *propList = pa_proplist_new(); + if (propList == nullptr) { + AUDIO_ERR_LOG("pa_proplist_new failed"); + pa_threaded_mainloop_unlock(mainLoop); + return AUDIO_CLIENT_ERR; + } + + pa_proplist_sets(propList, "scene.type", effectSceneName.c_str()); + pa_proplist_sets(propList, "scene.mode", effectModeName.c_str()); + pa_operation *updatePropOperation = pa_stream_proplist_update(paStream, PA_UPDATE_REPLACE, propList, + nullptr, nullptr); + pa_proplist_free(propList); + pa_operation_unref(updatePropOperation); + + pa_threaded_mainloop_unlock(mainLoop); + + return AUDIO_CLIENT_SUCCESS; +} } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_service/client/src/audio_stream.cpp b/services/audio_service/client/src/audio_stream.cpp index dd2950ff326fb13515d1a201661362bf11473a9d..c9e7a8628524d3079cf93e6017e7f2ef4c675fc6 100644 --- a/services/audio_service/client/src/audio_stream.cpp +++ b/services/audio_service/client/src/audio_stream.cpp @@ -969,5 +969,15 @@ void AudioStream::SubmitAllFreeBuffers() SendWriteBufferRequestEvent(); } } + +int32_t AudioStream::SetAudioEffectMode(AudioEffectMode effectMode) +{ + return SetStreamAudioEffectMode(effectMode); +} + +AudioEffectMode AudioStream::GetAudioEffectMode() +{ + return GetStreamAudioEffectMode(); +} } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_service/client/src/audio_stream_manager.cpp b/services/audio_service/client/src/audio_stream_manager.cpp index 63aac575ec0d20e96b91831c12b532076b1b8dcf..145a9fa872f2b8eff7987404090a2d54f68b16ae 100644 --- a/services/audio_service/client/src/audio_stream_manager.cpp +++ b/services/audio_service/client/src/audio_stream_manager.cpp @@ -17,6 +17,7 @@ #include "audio_policy_manager.h" #include "audio_log.h" #include "audio_stream_manager.h" +#include "audio_stream.h" namespace OHOS { namespace AudioStandard { @@ -80,5 +81,42 @@ bool AudioStreamManager::IsAudioRendererLowLatencySupported(const AudioStreamInf AUDIO_DEBUG_LOG("IsAudioRendererLowLatencySupported"); return AudioPolicyManager::GetInstance().IsAudioRendererLowLatencySupported(audioStreamInfo); } + +static void UpdateEffectInfoArray(SupportedEffectConfig &supportedEffectConfig, + AudioSceneEffectInfo &audioSceneEffectInfo, int i) +{ + uint32_t j; + AudioEffectMode audioEffectMode; + for (j = 0; j < supportedEffectConfig.postProcessNew.stream[i].streamEffectMode.size(); j++) { + audioEffectMode = effectModeMap.at(supportedEffectConfig.postProcessNew.stream[i].streamEffectMode[j].mode); + audioSceneEffectInfo.mode.push_back(audioEffectMode); + } + auto index = std::find(audioSceneEffectInfo.mode.begin(), audioSceneEffectInfo.mode.end(), 0); + if (index == audioSceneEffectInfo.mode.end()) { + audioEffectMode = effectModeMap.at("EFFECT_NONE"); + audioSceneEffectInfo.mode.push_back(audioEffectMode); + } + std::sort(audioSceneEffectInfo.mode.begin(), audioSceneEffectInfo.mode.end()); +} + +int32_t AudioStreamManager::GetEffectInfoArray(AudioSceneEffectInfo &audioSceneEffectInfo, + ContentType contentType, StreamUsage streamUsage) +{ + int i; + AudioStreamType streamType = AudioStream::GetStreamType(contentType, streamUsage); + std::string effectScene = AudioServiceClient::GetEffectSceneName(streamType); + SupportedEffectConfig supportedEffectConfig; + int ret = AudioPolicyManager::GetInstance().QueryEffectSceneMode(supportedEffectConfig); + int streamNum = supportedEffectConfig.postProcessNew.stream.size(); + if (streamNum > 0) { + for (i = 0; i < streamNum; i++) { + if (effectScene == supportedEffectConfig.postProcessNew.stream[i].scene) { + UpdateEffectInfoArray(supportedEffectConfig, audioSceneEffectInfo, i); + break; + } + } + } + return ret; +} } // namespace AudioStandard } // namespace OHOS diff --git a/services/audio_service/server/include/audio_effect_server.h b/services/audio_service/server/include/audio_effect_server.h index 61f56a53c84019e6415fbacddf5f45c8a2a9c76d..a3dafa2d846c662e71b88df079b52511732c61a1 100644 --- a/services/audio_service/server/include/audio_effect_server.h +++ b/services/audio_service/server/include/audio_effect_server.h @@ -20,80 +20,14 @@ #include #include #include "audio_info.h" +#include "audio_effect.h" #ifndef ST_AUDIO_EFFECT_SERVER_H #define ST_AUDIO_EFFECT_SERVER_H -#define AUDIO_EFFECT_LIBRARY_INFO_SYM AELI -#define AUDIO_EFFECT_LIBRARY_INFO_SYM_AS_STR "AELI" -#define EFFECT_STRING_LEN_MAX 64 - namespace OHOS { namespace AudioStandard { -typedef struct EffectInterfaceS **EffectHandleT; - -typedef struct AudioBufferS { - size_t frameCount; // number of frames in buffer - union { - void *raw; // raw pointer to start of buffer - float *f32; // pointer to float 32 bit data at start of buffer - int32_t *s32; // pointer to signed 32 bit data at start of buffer - int16_t *s16; // pointer to signed 16 bit data at start of buffer - uint8_t *u8; // pointer to unsigned 8 bit data at start of buffer - }; -} AudioBufferT; - -// for initial version -typedef struct EffectDescriptorS { - std::string type; - std::string id; - uint32_t apiVersion; - uint32_t flags; - uint16_t cpuLoad; - uint16_t memoryUsage; - char name[EFFECT_STRING_LEN_MAX]; - char implementor[EFFECT_STRING_LEN_MAX]; -} EffectDescriptorT; - -struct EffectInterfaceS { - int32_t (*Process)(EffectHandleT self, AudioBufferT *inBuffer, AudioBufferT *outBuffer); - - int32_t (*Command)(EffectHandleT self, uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, - void *pReplyData); - - int32_t (*GetDescriptor)(EffectHandleT self, EffectDescriptorT *pDescriptor); - - int32_t (*ProcessReverse)(EffectHandleT self, AudioBufferT *inBuffer, AudioBufferT *outBuffer); -}; - -// for initial version -typedef struct AudioEffectLibraryS { - uint32_t tag; - uint32_t version; - const char *name; - const char *implementor; - - int32_t (*CreateEffect)(const std::string *id, int32_t sessionId, int32_t ioId, EffectHandleT *pHandle); - - int32_t (*ReleaseEffect)(EffectHandleT handle); - - int32_t (*GetDescriptor)(const std::string *id, EffectDescriptorT *pDescriptor); -} AudioEffectLibraryT; - -typedef struct ListNodeS { - void *object; - struct ListNodeS *next; -} ListNodeT; - -typedef struct LibEntryS { - AudioEffectLibraryT *desc; - std::string name; - std::string path; - void *handle; - std::vector> effects; -} LibEntryT; - class AudioEffectServer { public: explicit AudioEffectServer(); diff --git a/services/audio_service/server/include/audio_server.h b/services/audio_service/server/include/audio_server.h index b631c69a3e1594852b063089750910362e2ce917..a0c108a58b0984bcb47949897766819d28b7d557 100644 --- a/services/audio_service/server/include/audio_server.h +++ b/services/audio_service/server/include/audio_server.h @@ -45,6 +45,7 @@ public: bool LoadAudioEffectLibraries(std::vector libraries, std::vector effects, std::vector& successEffectList) override; + bool CreateEffectChainManager(std::vector effectChains) override; int32_t SetMicrophoneMute(bool isMute) override; bool IsMicrophoneMute() override; int32_t SetVoiceVolume(float volume) override; diff --git a/services/audio_service/server/src/audio_manager_stub.cpp b/services/audio_service/server/src/audio_manager_stub.cpp index 3780d05c4260fe85b062f90dc199e1b39fbc8436..67cba39757b571f9859c7c23a8dbc809fb1f3ff0 100644 --- a/services/audio_service/server/src/audio_manager_stub.cpp +++ b/services/audio_service/server/src/audio_manager_stub.cpp @@ -35,8 +35,7 @@ static void LoadEffectLibrariesReadData(vector& libList, vector for (i = 0; i < countEff; i++) { string effectName = data.ReadString(); string libName = data.ReadString(); - string effectId = data.ReadString(); - effectList.push_back({effectName, libName, effectId}); + effectList.push_back({effectName, libName}); } } @@ -46,7 +45,6 @@ static void LoadEffectLibrariesWriteReply(vector& successEffectList, Mes for (Effect effect: successEffectList) { reply.WriteString(effect.name); reply.WriteString(effect.libraryName); - reply.WriteString(effect.effectId); } } @@ -231,6 +229,32 @@ int AudioManagerStub::OnRemoteRequest(uint32_t code, MessageParcel &data, Messag } return AUDIO_OK; } + + case CREATE_AUDIO_EFFECT_CHAIN_MANAGER: { + vector effectChains = {}; + vector countEffect = {}; + int32_t countEffectChains = data.ReadInt32(); + for(int i=0;i libraries, return loadSuccess; } +bool AudioServer::CreateEffectChainManager(std::vector effectChains){ + int32_t audio_policy_server_id = 1041; + if (IPCSkeleton::GetCallingUid() != audio_policy_server_id) { + return false; + } + + AUDIO_INFO_LOG("xjl: init audio effect chain manager in audio server"); + AudioEffectChainManager *audioEffectChainManager = AudioEffectChainManager::GetInstance(); + audioEffectChainManager->InitAudioEffectChain(effectChains, audioEffectServer_->GetAvailableEffects()); + return true; +} + int32_t AudioServer::SetMicrophoneMute(bool isMute) { int32_t audio_policy_server_id = 1041;