From 6cbf6efc65188bf322a42fe473bd56941c5952df Mon Sep 17 00:00:00 2001 From: t00605578 Date: Sat, 16 Apr 2022 16:16:03 +0800 Subject: [PATCH 1/4] fix codex for pr29 Signed-off-by: t00605578 --- common/include/utils/dcamera_utils_tools.h | 1 + common/src/utils/dcamera_utils_tools.cpp | 10 + services/data_process/BUILD.gn | 16 +- .../fpscontroller/fps_controller_process.h | 2 +- .../{ => decoder}/decode_data_process.h | 27 +- .../decoder/decode_surface_listener.h | 46 ++++ .../{ => decoder}/decode_video_callback.h | 0 .../{ => encoder}/encode_data_process.h | 6 +- .../{ => encoder}/encode_video_callback.h | 0 .../fpscontroller/fps_controller_process.cpp | 2 +- .../{ => decoder}/decode_data_process.cpp | 151 ++++++----- .../decode_data_process_common.cpp | 247 +++++++----------- .../decoder/decode_surface_listener.cpp | 63 +++++ .../{ => decoder}/decode_video_callback.cpp | 0 .../{ => encoder}/encode_data_process.cpp | 60 ++++- .../encode_data_process_common.cpp | 61 +++-- .../{ => encoder}/encode_video_callback.cpp | 0 17 files changed, 407 insertions(+), 285 deletions(-) rename services/data_process/include/pipeline_node/multimedia_codec/{ => decoder}/decode_data_process.h (87%) create mode 100644 services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_surface_listener.h rename services/data_process/include/pipeline_node/multimedia_codec/{ => decoder}/decode_video_callback.h (100%) rename services/data_process/include/pipeline_node/multimedia_codec/{ => encoder}/encode_data_process.h (95%) rename services/data_process/include/pipeline_node/multimedia_codec/{ => encoder}/encode_video_callback.h (100%) rename services/data_process/src/pipeline_node/multimedia_codec/{ => decoder}/decode_data_process.cpp (91%) rename services/data_process/src/pipeline_node/multimedia_codec/{ => decoder}/decode_data_process_common.cpp (76%) create mode 100644 services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp rename services/data_process/src/pipeline_node/multimedia_codec/{ => decoder}/decode_video_callback.cpp (100%) rename services/data_process/src/pipeline_node/multimedia_codec/{ => encoder}/encode_data_process.cpp (92%) rename services/data_process/src/pipeline_node/multimedia_codec/{ => encoder}/encode_data_process_common.cpp (92%) rename services/data_process/src/pipeline_node/multimedia_codec/{ => encoder}/encode_video_callback.cpp (100%) diff --git a/common/include/utils/dcamera_utils_tools.h b/common/include/utils/dcamera_utils_tools.h index 13892032..bcd64cad 100644 --- a/common/include/utils/dcamera_utils_tools.h +++ b/common/include/utils/dcamera_utils_tools.h @@ -27,6 +27,7 @@ const std::string BASE_64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrs int32_t GetLocalDeviceNetworkId(std::string& networkId); int64_t GetNowTimeStampMs(); int64_t GetNowTimeStampUs(); +int32_t GetAlignedHeight(int32_t width); std::string Base64Encode(const unsigned char *toEncode, unsigned int len); std::string Base64Decode(const std::string& basicString); bool IsBase64(unsigned char c); diff --git a/common/src/utils/dcamera_utils_tools.cpp b/common/src/utils/dcamera_utils_tools.cpp index 65a701ee..ae03d8fb 100644 --- a/common/src/utils/dcamera_utils_tools.cpp +++ b/common/src/utils/dcamera_utils_tools.cpp @@ -57,6 +57,16 @@ int64_t GetNowTimeStampUs() return nowUs.count(); } +int32_t GetAlignedHeight(int32_t width) +{ + int32_t alignedBits = 32; + int32_t alignedHeight = width; + if (alignedHeight % alignedBits != 0) { + alignedHeight = ((alignedHeight / alignedBits) + 1) * alignedBits; + } + return alignedHeight; +} + std::string Base64Encode(const unsigned char *toEncode, unsigned int len) { std::string ret; diff --git a/services/data_process/BUILD.gn b/services/data_process/BUILD.gn index cc8dda17..13a1b4b7 100644 --- a/services/data_process/BUILD.gn +++ b/services/data_process/BUILD.gn @@ -36,7 +36,8 @@ ohos_shared_library("distributed_camera_data_process") { "include/eventbus", "include/pipeline", "include/utils", - "include/pipeline_node/multimedia_codec", + "include/pipeline_node/multimedia_codec/decoder", + "include/pipeline_node/multimedia_codec/encoder", "include/pipeline_node/colorspace_conversion", "include/pipeline_node/fpscontroller", "${common_path}/include/constants", @@ -50,20 +51,21 @@ ohos_shared_library("distributed_camera_data_process") { "src/pipeline/dcamera_pipeline_source.cpp", "src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp", "src/pipeline_node/fpscontroller/fps_controller_process.cpp", - "src/pipeline_node/multimedia_codec/decode_video_callback.cpp", - "src/pipeline_node/multimedia_codec/encode_video_callback.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_video_callback.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp", + "src/pipeline_node/multimedia_codec/encoder/encode_video_callback.cpp", "src/utils/image_common_type.cpp", ] if ("${product_name}" == "m40") { sources += [ - "src/pipeline_node/multimedia_codec/decode_data_process.cpp", - "src/pipeline_node/multimedia_codec/encode_data_process.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_data_process.cpp", + "src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp", ] } else { sources += [ - "src/pipeline_node/multimedia_codec/decode_data_process_common.cpp", - "src/pipeline_node/multimedia_codec/encode_data_process_common.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp", + "src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp", ] } diff --git a/services/data_process/include/pipeline_node/fpscontroller/fps_controller_process.h b/services/data_process/include/pipeline_node/fpscontroller/fps_controller_process.h index 1dec54d6..9afbe082 100644 --- a/services/data_process/include/pipeline_node/fpscontroller/fps_controller_process.h +++ b/services/data_process/include/pipeline_node/fpscontroller/fps_controller_process.h @@ -43,7 +43,7 @@ private: float CalculateFrameRate(int64_t nowMs); bool IsDropFrame(float incomingFps); bool ReduceFrameRateByUniformStrategy(int32_t incomingFps); - int32_t FpsControllerDone(std::vector> outputBuffers); + int32_t FpsControllerDone(std::vector>& outputBuffers); private: const static uint32_t MAX_TARGET_FRAME_RATE = 30; diff --git a/services/data_process/include/pipeline_node/multimedia_codec/decode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h similarity index 87% rename from services/data_process/include/pipeline_node/multimedia_codec/decode_data_process.h rename to services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h index 2c1e95bc..a15186ca 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/decode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h @@ -24,6 +24,7 @@ #include #include "surface.h" +#include "ibuffer_consumer_listener.h" #include "media_errors.h" #include "avcodec_common.h" #include "format.h" @@ -78,26 +79,31 @@ private: int32_t InitDecoder(); int32_t InitDecoderMetadataFormat(); int32_t SetDecoderOutputSurface(); + int32_t StopVideoDecoder(); + void ReleaseVideoDecoder(); + void ReleaseDecoderSurface(); + void ReleaseCodecEvent(); int32_t FeedDecoderInputBuffer(); int64_t GetDecoderTimeStamp(); - int32_t GetAlignedHeight(); void CopyDecodedImage(const sptr& surBuf, int64_t timeStampUs, int32_t alignedWidth, int32_t alignedHeight); int32_t CopyYUVPlaneByRow(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); int32_t CheckCopyImageInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); bool IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo); void PostOutputDataBuffers(std::shared_ptr& outputBuffer); - int32_t DecodeDone(std::vector> outputBuffers); + int32_t DecodeDone(std::vector>& outputBuffers); private: const static int32_t VIDEO_DECODER_QUEUE_MAX = 1000; const static int32_t MAX_YUV420_BUFFER_SIZE = 1920 * 1080 * 3 / 2 * 2; + const static int32_t MAX_RGB32_BUFFER_SIZE = 1920 * 1080 * 4 * 2; const static uint32_t MAX_FRAME_RATE = 30; const static uint32_t MIN_VIDEO_WIDTH = 320; const static uint32_t MIN_VIDEO_HEIGHT = 240; const static uint32_t MAX_VIDEO_WIDTH = 1920; const static uint32_t MAX_VIDEO_HEIGHT = 1080; const static int32_t FIRST_FRAME_INPUT_NUM = 2; + const static int32_t RGB32_MEMORY_COEFFICIENT = 4; std::mutex mtxDecoderState_; std::mutex mtxHoldCount_; @@ -108,7 +114,7 @@ private: std::shared_ptr eventBusDecode_ = nullptr; std::shared_ptr eventBusRegHandleDecode_ = nullptr; std::shared_ptr eventBusRegHandlePipeline2Decode_ = nullptr; - std::shared_ptr videoDecoder_ = nullptr; + std::shared_ptr videoDecoder_ = nullptr; std::shared_ptr decodeVideoCallback_ = nullptr; sptr decodeConsumerSurface_ = nullptr; sptr decodeProducerSurface_ = nullptr; @@ -126,21 +132,6 @@ private: std::queue> inputBuffersQueue_; std::queue availableInputIndexsQueue_; }; - -class DecodeSurfaceListener : public IBufferConsumerListener { -public: - DecodeSurfaceListener(sptr surface, std::weak_ptr decodeVideoNode) - : surface_(surface), decodeVideoNode_(decodeVideoNode) {} - ~DecodeSurfaceListener(); - - void OnBufferAvailable() override; - void SetSurface(const sptr& surface); - void SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode); - -private: - sptr surface_; - std::weak_ptr decodeVideoNode_; -}; } // namespace DistributedHardware } // namespace OHOS #endif // OHOS_DECODE_DATA_PROCESS_H diff --git a/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_surface_listener.h b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_surface_listener.h new file mode 100644 index 00000000..04b011c0 --- /dev/null +++ b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_surface_listener.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DECODE_SURFACE_LISTENER_H +#define OHOS_DECODE_SURFACE_LISTENER_H + +#include "surface.h" +#include "ibuffer_consumer_listener.h" + +#include "decode_data_process.h" + +namespace OHOS { +namespace DistributedHardware { +class DecodeDataProcess; + +class DecodeSurfaceListener : public IBufferConsumerListener { +public: + DecodeSurfaceListener(sptr surface, std::weak_ptr decodeVideoNode) + : surface_(surface), decodeVideoNode_(decodeVideoNode) {} + ~DecodeSurfaceListener(); + + void OnBufferAvailable() override; + void SetSurface(const sptr& surface); + void SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode); + sptr GetSurface() const; + std::shared_ptr GetDecodeVideoNode() const; + +private: + sptr surface_; + std::weak_ptr decodeVideoNode_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DECODE_SURFACE_LISTENER_H diff --git a/services/data_process/include/pipeline_node/multimedia_codec/decode_video_callback.h b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_video_callback.h similarity index 100% rename from services/data_process/include/pipeline_node/multimedia_codec/decode_video_callback.h rename to services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_video_callback.h diff --git a/services/data_process/include/pipeline_node/multimedia_codec/encode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h similarity index 95% rename from services/data_process/include/pipeline_node/multimedia_codec/encode_data_process.h rename to services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h index 8d87fc5e..6bcac38f 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/encode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h @@ -63,11 +63,13 @@ private: int32_t InitEncoder(); int32_t InitEncoderMetadataFormat(); int32_t InitEncoderBitrateFormat(); + int32_t StopVideoEncoder(); + void ReleaseVideoEncoder(); int32_t FeedEncoderInputBuffer(std::shared_ptr& inputBuffer); sptr GetEncoderInputSurfaceBuffer(); int64_t GetEncoderTimeStamp(); int32_t GetEncoderOutputBuffer(uint32_t index, Media::AVCodecBufferInfo info); - int32_t EncodeDone(std::vector> outputBuffers); + int32_t EncodeDone(std::vector>& outputBuffers); private: const static int32_t ENCODER_STRIDE_ALIGNMENT = 8; @@ -107,7 +109,7 @@ private: VideoConfigParams sourceConfig_; VideoConfigParams targetConfig_; std::weak_ptr callbackPipelineSink_; - std::shared_ptr videoEncoder_ = nullptr; + std::shared_ptr videoEncoder_ = nullptr; std::shared_ptr encodeVideoCallback_ = nullptr; sptr encodeProducerSurface_ = nullptr; diff --git a/services/data_process/include/pipeline_node/multimedia_codec/encode_video_callback.h b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_video_callback.h similarity index 100% rename from services/data_process/include/pipeline_node/multimedia_codec/encode_video_callback.h rename to services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_video_callback.h diff --git a/services/data_process/src/pipeline_node/fpscontroller/fps_controller_process.cpp b/services/data_process/src/pipeline_node/fpscontroller/fps_controller_process.cpp index daadcf5f..1e9b3562 100644 --- a/services/data_process/src/pipeline_node/fpscontroller/fps_controller_process.cpp +++ b/services/data_process/src/pipeline_node/fpscontroller/fps_controller_process.cpp @@ -303,7 +303,7 @@ bool FpsControllerProcess::ReduceFrameRateByUniformStrategy(int32_t incomingFrmR return isDrop; } -int32_t FpsControllerProcess::FpsControllerDone(std::vector> outputBuffers) +int32_t FpsControllerProcess::FpsControllerDone(std::vector>& outputBuffers) { if (outputBuffers.empty()) { DHLOGE("The received data buffers is empty."); diff --git a/services/data_process/src/pipeline_node/multimedia_codec/decode_data_process.cpp b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process.cpp similarity index 91% rename from services/data_process/src/pipeline_node/multimedia_codec/decode_data_process.cpp rename to services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process.cpp index da8a0af7..ab263856 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/decode_data_process.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process.cpp @@ -20,6 +20,7 @@ #include "convert_nv12_to_nv21.h" #include "dcamera_utils_tools.h" +#include "decode_surface_listener.h" #include "decode_video_callback.h" namespace OHOS { @@ -57,21 +58,11 @@ int32_t DecodeDataProcess::InitNode() ReleaseProcessNode(); return err; } - alignedHeight_ = GetAlignedHeight(); + alignedHeight_ = GetAlignedHeight(static_cast(sourceConfig_.GetHeight())); isDecoderProcess_ = true; return DCAMERA_OK; } -int32_t DecodeDataProcess::GetAlignedHeight() -{ - int32_t alignedBits = 32; - int32_t alignedHeight = static_cast(sourceConfig_.GetHeight()); - if (alignedHeight % alignedBits != 0) { - alignedHeight = ((alignedHeight / alignedBits) + 1) * alignedBits; - } - return alignedHeight; -} - bool DecodeDataProcess::IsInDecoderRange(const VideoConfigParams& curConfig) { return (curConfig.GetWidth() >= MIN_VIDEO_WIDTH || curConfig.GetWidth() <= MAX_VIDEO_WIDTH || @@ -158,8 +149,8 @@ int32_t DecodeDataProcess::InitDecoderMetadataFormat() } metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::NV12); metadataFormat_.PutIntValue("max_input_size", MAX_YUV420_BUFFER_SIZE); - metadataFormat_.PutIntValue("width", (int32_t)sourceConfig_.GetWidth()); - metadataFormat_.PutIntValue("height", (int32_t)sourceConfig_.GetHeight()); + metadataFormat_.PutIntValue("width", static_cast(sourceConfig_.GetWidth())); + metadataFormat_.PutIntValue("height", static_cast(sourceConfig_.GetHeight())); metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); return DCAMERA_OK; } @@ -206,43 +197,94 @@ int32_t DecodeDataProcess::SetDecoderOutputSurface() return DCAMERA_OK; } -void DecodeDataProcess::ReleaseProcessNode() +int32_t DecodeDataProcess::StopVideoDecoder() { - DHLOGD("Start release [%d] node : DecodeNode.", nodeRank_); - isDecoderProcess_ = false; - if (nextDataProcess_ != nullptr) { - nextDataProcess_->ReleaseProcessNode(); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before StopVideoDecoder."); + return DCAMERA_BAD_VALUE; + } + + bool isSuccess = true; + int32_t ret = videoDecoder_->Flush(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoDecoder flush failed. Error type: %d.", ret); + isSuccess = isSuccess && false; + } + ret = videoDecoder_->Stop(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoDecoder stop failed. Error type: %d.", ret); + isSuccess = isSuccess && false; + } + if (!isSuccess) { + return DCAMERA_BAD_OPERATE; + } + return DCAMERA_OK; +} + +void DecodeDataProcess::ReleaseVideoDecoder() +{ + std::lock_guard lck(mtxDecoderState_); + DHLOGD("Start release videoDecoder."); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before ReleaseVideoDecoder."); + decodeVideoCallback_ = nullptr; + return; + } + int32_t ret = StopVideoDecoder(); + if (ret != DCAMERA_OK) { + DHLOGE("StopVideoDecoder failed."); + } + ret = videoDecoder_->Release(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoDecoder release failed. Error type: %d.", ret); + } + videoDecoder_ = nullptr; + decodeVideoCallback_ = nullptr; +} + +void DecodeDataProcess::ReleaseDecoderSurface() +{ + if (decodeConsumerSurface_ == nullptr) { + decodeProducerSurface_ = nullptr; + DHLOGE("The decode consumer surface does not exist before UnregisterConsumerListener."); + return; + } + int32_t ret = decodeConsumerSurface_->UnregisterConsumerListener(); + if (ret != SURFACE_ERROR_OK) { + DHLOGE("Unregister consumer listener failed. Error type: %d.", ret); } - if (eventBusDecode_ != nullptr && eventBusPipeline_ != nullptr) { - DHLOGD("Start release DecodeNode eventBusDecode_ and eventBusPipeline_."); - DCameraCodecEvent codecEvent(*this, std::make_shared()); + decodeConsumerSurface_ = nullptr; + decodeProducerSurface_ = nullptr; +} + +void DecodeDataProcess::ReleaseCodecEvent() +{ + DCameraCodecEvent codecEvent(*this, std::make_shared()); + if (eventBusDecode_ != nullptr) { eventBusDecode_->RemoveHandler(codecEvent.GetType(), eventBusRegHandleDecode_); + eventBusRegHandleDecode_ = nullptr; eventBusDecode_ = nullptr; + } + if (eventBusPipeline_ != nullptr) { eventBusPipeline_->RemoveHandler(codecEvent.GetType(), eventBusRegHandlePipeline2Decode_); + eventBusRegHandlePipeline2Decode_ = nullptr; eventBusPipeline_ = nullptr; } + DHLOGD("Release DecodeNode eventBusDecode and eventBusPipeline end."); +} - { - std::lock_guard lck(mtxDecoderState_); - if (videoDecoder_ != nullptr) { - DHLOGD("Start release videoDecoder."); - videoDecoder_->Flush(); - videoDecoder_->Stop(); - videoDecoder_->Release(); - videoDecoder_ = nullptr; - decodeVideoCallback_ = nullptr; - } - } - if (decodeConsumerSurface_ != nullptr) { - int32_t ret = decodeConsumerSurface_->UnregisterConsumerListener(); - if (ret != SURFACE_ERROR_OK) { - DHLOGE("Unregister consumer listener failed. Error type: %d.", ret); - } - decodeConsumerSurface_ = nullptr; - decodeProducerSurface_ = nullptr; - decodeSurfaceListener_ = nullptr; +void DecodeDataProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : DecodeNode.", nodeRank_); + isDecoderProcess_ = false; + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); } + ReleaseCodecEvent(); + ReleaseVideoDecoder(); + ReleaseDecoderSurface(); + processType_ = ""; std::queue> emptyBuffersQueue; inputBuffersQueue_.swap(emptyBuffersQueue); @@ -555,7 +597,7 @@ void DecodeDataProcess::PostOutputDataBuffers(std::shared_ptr& outpu DHLOGD("Send video decoder output asynchronous DCameraCodecEvents success."); } -int32_t DecodeDataProcess::DecodeDone(std::vector> outputBuffers) +int32_t DecodeDataProcess::DecodeDone(std::vector>& outputBuffers) { DHLOGD("Decoder Done."); if (outputBuffers.empty()) { @@ -681,32 +723,5 @@ VideoConfigParams DecodeDataProcess::GetTargetConfig() const { return targetConfig_; } - -void DecodeSurfaceListener::OnBufferAvailable() -{ - DHLOGD("DecodeSurfaceListener : OnBufferAvailable."); - std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); - if (targetDecoderNode == nullptr) { - DHLOGE("decodeVideoNode_ is nullptr."); - return; - } - targetDecoderNode->GetDecoderOutputBuffer(surface_); -} - -void DecodeSurfaceListener::SetSurface(const sptr& surface) -{ - surface_ = surface; -} - -void DecodeSurfaceListener::SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode) -{ - decodeVideoNode_ = decodeVideoNode; -} - -DecodeSurfaceListener::~DecodeSurfaceListener() -{ - DHLOGD("DecodeSurfaceListener : ~DecodeSurfaceListener."); - surface_ = nullptr; -} } // namespace DistributedHardware } // namespace OHOS diff --git a/services/data_process/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp similarity index 76% rename from services/data_process/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp rename to services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp index 77de7980..8ec1b045 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp @@ -20,6 +20,7 @@ #include "convert_nv12_to_nv21.h" #include "dcamera_utils_tools.h" +#include "decode_surface_listener.h" #include "decode_video_callback.h" namespace OHOS { @@ -57,21 +58,11 @@ int32_t DecodeDataProcess::InitNode() ReleaseProcessNode(); return err; } - alignedHeight_ = GetAlignedHeight(); + alignedHeight_ = GetAlignedHeight(static_cast(sourceConfig_.GetHeight())); isDecoderProcess_ = true; return DCAMERA_OK; } -int32_t DecodeDataProcess::GetAlignedHeight() -{ - int32_t alignedBits = 32; - int32_t alignedHeight = static_cast(sourceConfig_.GetHeight()); - if (alignedHeight % alignedBits != 0) { - alignedHeight = ((alignedHeight / alignedBits) + 1) * alignedBits; - } - return alignedHeight; -} - bool DecodeDataProcess::IsInDecoderRange(const VideoConfigParams& curConfig) { return (curConfig.GetWidth() >= MIN_VIDEO_WIDTH || curConfig.GetWidth() <= MAX_VIDEO_WIDTH || @@ -145,13 +136,11 @@ int32_t DecodeDataProcess::InitDecoderMetadataFormat() DHLOGD("Common Init video decoder metadata format."); processType_ = "video/mp4v-es"; metadataFormat_.PutStringValue("codec_mime", processType_); - - int32_t width = (int32_t)sourceConfig_.GetWidth(); - int32_t height = (int32_t)sourceConfig_.GetHeight(); + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::RGBA); - metadataFormat_.PutIntValue("max_input_size", width * height * 4 * 2); - metadataFormat_.PutIntValue("width", width); - metadataFormat_.PutIntValue("height", height); + metadataFormat_.PutIntValue("max_input_size", MAX_RGB32_BUFFER_SIZE); + metadataFormat_.PutIntValue("width", static_cast(sourceConfig_.GetWidth())); + metadataFormat_.PutIntValue("height", static_cast(sourceConfig_.GetHeight())); metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); return DCAMERA_OK; } @@ -198,43 +187,94 @@ int32_t DecodeDataProcess::SetDecoderOutputSurface() return DCAMERA_OK; } -void DecodeDataProcess::ReleaseProcessNode() +int32_t DecodeDataProcess::StopVideoDecoder() { - DHLOGD("Start release [%d] node : DecodeNode.", nodeRank_); - isDecoderProcess_ = false; - if (nextDataProcess_ != nullptr) { - nextDataProcess_->ReleaseProcessNode(); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before StopVideoDecoder."); + return DCAMERA_BAD_VALUE; } - if (eventBusDecode_ != nullptr && eventBusPipeline_ != nullptr) { - DHLOGD("Start release DecodeNode eventBusDecode_ and eventBusPipeline_."); - DCameraCodecEvent codecEvent(*this, std::make_shared()); + + bool isSuccess = true; + int32_t ret = videoDecoder_->Flush(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoDecoder flush failed. Error type: %d.", ret); + isSuccess = isSuccess && false; + } + ret = videoDecoder_->Stop(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoDecoder stop failed. Error type: %d.", ret); + isSuccess = isSuccess && false; + } + if (!isSuccess) { + return DCAMERA_BAD_OPERATE; + } + return DCAMERA_OK; +} + +void DecodeDataProcess::ReleaseVideoDecoder() +{ + std::lock_guard lck(mtxDecoderState_); + DHLOGD("Start release videoDecoder."); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before ReleaseVideoDecoder."); + decodeVideoCallback_ = nullptr; + return; + } + int32_t ret = StopVideoDecoder(); + if (ret != DCAMERA_OK) { + DHLOGE("StopVideoDecoder failed."); + } + ret = videoDecoder_->Release(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoDecoder release failed. Error type: %d.", ret); + } + videoDecoder_ = nullptr; + decodeVideoCallback_ = nullptr; +} + +void DecodeDataProcess::ReleaseDecoderSurface() +{ + if (decodeConsumerSurface_ == nullptr) { + decodeProducerSurface_ = nullptr; + DHLOGE("The decode consumer surface does not exist before UnregisterConsumerListener."); + return; + } + int32_t ret = decodeConsumerSurface_->UnregisterConsumerListener(); + if (ret != SURFACE_ERROR_OK) { + DHLOGE("Unregister consumer listener failed. Error type: %d.", ret); + } + decodeConsumerSurface_ = nullptr; + decodeProducerSurface_ = nullptr; +} + +void DecodeDataProcess::ReleaseCodecEvent() +{ + DCameraCodecEvent codecEvent(*this, std::make_shared()); + if (eventBusDecode_ != nullptr) { eventBusDecode_->RemoveHandler(codecEvent.GetType(), eventBusRegHandleDecode_); + eventBusRegHandleDecode_ = nullptr; eventBusDecode_ = nullptr; + } + if (eventBusPipeline_ != nullptr) { eventBusPipeline_->RemoveHandler(codecEvent.GetType(), eventBusRegHandlePipeline2Decode_); + eventBusRegHandlePipeline2Decode_ = nullptr; eventBusPipeline_ = nullptr; } + DHLOGD("Release DecodeNode eventBusDecode and eventBusPipeline end."); +} - { - std::lock_guard lck(mtxDecoderState_); - if (videoDecoder_ != nullptr) { - DHLOGD("Start release videoDecoder."); - videoDecoder_->Flush(); - videoDecoder_->Stop(); - videoDecoder_->Release(); - videoDecoder_ = nullptr; - decodeVideoCallback_ = nullptr; - } - } - if (decodeConsumerSurface_ != nullptr) { - int32_t ret = decodeConsumerSurface_->UnregisterConsumerListener(); - if (ret != SURFACE_ERROR_OK) { - DHLOGE("Unregister consumer listener failed. Error type: %d.", ret); - } - decodeConsumerSurface_ = nullptr; - decodeProducerSurface_ = nullptr; - decodeSurfaceListener_ = nullptr; +void DecodeDataProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : DecodeNode.", nodeRank_); + isDecoderProcess_ = false; + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); } + ReleaseCodecEvent(); + ReleaseVideoDecoder(); + ReleaseDecoderSurface(); + processType_ = ""; std::queue> emptyBuffersQueue; inputBuffersQueue_.swap(emptyBuffersQueue); @@ -268,8 +308,7 @@ int32_t DecodeDataProcess::ProcessData(std::vector>& DHLOGE("video decoder input buffers queue over flow."); return DCAMERA_INDEX_OVERFLOW; } - int32_t bufferSize = 1920 * 1808 * 4 * 2; - if (inputBuffers[0]->Size() > bufferSize) { + if (inputBuffers[0]->Size() > MAX_RGB32_BUFFER_SIZE) { DHLOGE("DecodeNode input buffer size %d error.", inputBuffers[0]->Size()); return DCAMERA_MEMORY_OPT_ERROR; } @@ -405,7 +444,8 @@ void DecodeDataProcess::CopyDecodedImage(const sptr& surBuf, int6 DHLOGE("surface buffer is null!"); return; } - size_t validDecodedImageSize = static_cast(sourceConfig_.GetWidth() * sourceConfig_.GetHeight() * 4); + size_t validDecodedImageSize = static_cast(sourceConfig_.GetWidth() * sourceConfig_.GetHeight() * + RGB32_MEMORY_COEFFICIENT); size_t surfaceBufSize = static_cast(surBuf->GetSize()); if (validDecodedImageSize > surfaceBufSize) { DHLOGE("Buffer size error, validDecodedImageSize %d, surBufSize %d.", @@ -428,90 +468,6 @@ void DecodeDataProcess::CopyDecodedImage(const sptr& surBuf, int6 PostOutputDataBuffers(bufferOutput); } -int32_t DecodeDataProcess::CopyYUVPlaneByRow(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) -{ - int32_t ret = CheckCopyImageInfo(srcImgInfo, dstImgInfo); - if (ret != DCAMERA_OK) { - DHLOGE("Check CopyImageUnitInfo failed."); - return ret; - } - errno_t err = EOK; - int32_t srcDataOffset = 0; - int32_t dstDataOffset = 0; - for (int32_t yh = 0; yh < dstImgInfo.height; yh++) { - err = memcpy_s(dstImgInfo.imgData + dstDataOffset, dstImgInfo.chromaOffset - dstDataOffset, - srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); - if (err != EOK) { - DHLOGE("memcpy_s YPlane in line[%d] failed.", yh); - return DCAMERA_MEMORY_OPT_ERROR; - } - dstDataOffset += dstImgInfo.alignedWidth; - srcDataOffset += srcImgInfo.alignedWidth; - } - DHLOGD("Copy Yplane end, dstDataOffset %d, srcDataOffset %d, validYPlaneSize %d.", - dstDataOffset, srcDataOffset, dstImgInfo.chromaOffset); - - int32_t y2UvRatio = 2; - dstDataOffset = dstImgInfo.chromaOffset; - srcDataOffset = srcImgInfo.chromaOffset; - for (int32_t uvh = 0; uvh < dstImgInfo.height / y2UvRatio; uvh++) { - err = memcpy_s(dstImgInfo.imgData + dstDataOffset, dstImgInfo.imgSize - dstDataOffset, - srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); - if (err != EOK) { - DHLOGE("memcpy_s UVPlane in line[%d] failed.", uvh); - return DCAMERA_MEMORY_OPT_ERROR; - } - dstDataOffset += dstImgInfo.alignedWidth; - srcDataOffset += srcImgInfo.alignedWidth; - } - DHLOGD("Copy UVplane end, dstDataOffset %d, srcDataOffset %d.", dstDataOffset, srcDataOffset); - return DCAMERA_OK; -} - -int32_t DecodeDataProcess::CheckCopyImageInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) -{ - if (srcImgInfo.imgData == nullptr || dstImgInfo.imgData == nullptr) { - DHLOGE("The imgData of srcImgInfo or the imgData of dstImgInfo are null!"); - return DCAMERA_BAD_VALUE; - } - if (srcImgInfo.colorFormat != dstImgInfo.colorFormat) { - DHLOGE("CopyInfo error : srcImgInfo colorFormat %d, dstImgInfo colorFormat %d.", - srcImgInfo.colorFormat, dstImgInfo.colorFormat); - return DCAMERA_BAD_VALUE; - } - - if (!IsCorrectImageUnitInfo(srcImgInfo)) { - DHLOGE("srcImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + - "imgSize %lld.", srcImgInfo.width, srcImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight, - srcImgInfo.chromaOffset, srcImgInfo.imgSize); - return DCAMERA_BAD_VALUE; - } - if (!IsCorrectImageUnitInfo(dstImgInfo)) { - DHLOGE("dstImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + - "imgSize %lld.", dstImgInfo.width, dstImgInfo.height, dstImgInfo.alignedWidth, dstImgInfo.alignedHeight, - dstImgInfo.chromaOffset, dstImgInfo.imgSize); - return DCAMERA_BAD_VALUE; - } - - if (dstImgInfo.width > srcImgInfo.alignedWidth || dstImgInfo.height > srcImgInfo.alignedHeight) { - DHLOGE("Comparison ImgInfo fail: dstwidth %d, dstheight %d, srcAlignedWidth %d, srcAlignedHeight %d.", - dstImgInfo.width, dstImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight); - return DCAMERA_BAD_VALUE; - } - return DCAMERA_OK; -} - -bool DecodeDataProcess::IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo) -{ - int32_t y2UvRatio = 2; - int32_t bytesPerPixel = 3; - size_t expectedImgSize = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight * - bytesPerPixel / y2UvRatio); - size_t expectedChromaOffset = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight); - return (imgInfo.width <= imgInfo.alignedWidth && imgInfo.height <= imgInfo.alignedHeight && - imgInfo.imgSize >= expectedImgSize && imgInfo.chromaOffset == expectedChromaOffset); -} - void DecodeDataProcess::PostOutputDataBuffers(std::shared_ptr& outputBuffer) { if (eventBusDecode_ == nullptr || outputBuffer == nullptr) { @@ -527,7 +483,7 @@ void DecodeDataProcess::PostOutputDataBuffers(std::shared_ptr& outpu DHLOGD("Send video decoder output asynchronous DCameraCodecEvents success."); } -int32_t DecodeDataProcess::DecodeDone(std::vector> outputBuffers) +int32_t DecodeDataProcess::DecodeDone(std::vector>& outputBuffers) { DHLOGD("Decoder Done."); if (outputBuffers.empty()) { @@ -644,32 +600,5 @@ VideoConfigParams DecodeDataProcess::GetTargetConfig() const { return targetConfig_; } - -void DecodeSurfaceListener::OnBufferAvailable() -{ - DHLOGD("DecodeSurfaceListener : OnBufferAvailable."); - std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); - if (targetDecoderNode == nullptr) { - DHLOGE("decodeVideoNode_ is nullptr."); - return; - } - targetDecoderNode->GetDecoderOutputBuffer(surface_); -} - -void DecodeSurfaceListener::SetSurface(const sptr& surface) -{ - surface_ = surface; -} - -void DecodeSurfaceListener::SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode) -{ - decodeVideoNode_ = decodeVideoNode; -} - -DecodeSurfaceListener::~DecodeSurfaceListener() -{ - DHLOGD("DecodeSurfaceListener : ~DecodeSurfaceListener."); - surface_ = nullptr; -} } // namespace DistributedHardware } // namespace OHOS diff --git a/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp new file mode 100644 index 00000000..8707452e --- /dev/null +++ b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "decode_surface_listener.h" + +#include "distributed_hardware_log.h" + +namespace OHOS { +namespace DistributedHardware { +DecodeSurfaceListener::~DecodeSurfaceListener() +{ + DHLOGD("DecodeSurfaceListener : ~DecodeSurfaceListener."); + surface_ = nullptr; +} + +void DecodeSurfaceListener::OnBufferAvailable() +{ + DHLOGD("DecodeSurfaceListener : OnBufferAvailable."); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->GetDecoderOutputBuffer(surface_); +} + +void DecodeSurfaceListener::SetSurface(const sptr& surface) +{ + surface_ = surface; +} + +void DecodeSurfaceListener::SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode) +{ + decodeVideoNode_ = decodeVideoNode; +} + +sptr DecodeSurfaceListener::GetSurface() const +{ + return surface_; +} + +std::shared_ptr DecodeSurfaceListener::GetDecodeVideoNode() const +{ + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + } + return targetDecoderNode; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process/src/pipeline_node/multimedia_codec/decode_video_callback.cpp b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_video_callback.cpp similarity index 100% rename from services/data_process/src/pipeline_node/multimedia_codec/decode_video_callback.cpp rename to services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_video_callback.cpp diff --git a/services/data_process/src/pipeline_node/multimedia_codec/encode_data_process.cpp b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp similarity index 92% rename from services/data_process/src/pipeline_node/multimedia_codec/encode_data_process.cpp rename to services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp index 834b0390..de751469 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/encode_data_process.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp @@ -214,6 +214,49 @@ int32_t EncodeDataProcess::InitEncoderBitrateFormat() return DCAMERA_OK; } +int32_t EncodeDataProcess::StopVideoEncoder() +{ + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before StopVideoEncoder."); + return DCAMERA_BAD_VALUE; + } + int32_t ret = videoEncoder_->Flush(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoEncoder flush failed. Error type: %d.", ret); + return DCAMERA_BAD_OPERATE; + } + ret = videoEncoder_->Stop(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoEncoder stop failed. Error type: %d.", ret); + return DCAMERA_BAD_OPERATE; + } + return DCAMERA_OK; +} + +void EncodeDataProcess::ReleaseVideoEncoder() +{ + std::lock_guard lck(mtxEncoderState_); + DHLOGD("Start release videoEncoder."); + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before ReleaseVideoEncoder."); + encodeProducerSurface_ = nullptr; + encodeVideoCallback_ = nullptr; + return; + } + + int32_t ret = StopVideoEncoder(); + if (ret != DCAMERA_OK) { + DHLOGE("StopVideoEncoder failed."); + } + ret = videoEncoder_->Release(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoEncoder release failed. Error type: %d.", ret); + } + encodeProducerSurface_ = nullptr; + videoEncoder_ = nullptr; + encodeVideoCallback_ = nullptr; +} + void EncodeDataProcess::ReleaseProcessNode() { DHLOGD("Start release [%d] node : EncodeNode.", nodeRank_); @@ -222,18 +265,7 @@ void EncodeDataProcess::ReleaseProcessNode() nextDataProcess_->ReleaseProcessNode(); } - { - std::lock_guard lck(mtxEncoderState_); - if (videoEncoder_ != nullptr) { - DHLOGD("Start release videoEncoder."); - videoEncoder_->Flush(); - videoEncoder_->Stop(); - videoEncoder_->Release(); - encodeProducerSurface_ = nullptr; - videoEncoder_ = nullptr; - encodeVideoCallback_ = nullptr; - } - } + ReleaseVideoEncoder(); waitEncoderOutputCount_ = 0; lastFeedEncoderInputBufferTimeUs_ = 0; @@ -311,7 +343,7 @@ int32_t EncodeDataProcess::FeedEncoderInputBuffer(std::shared_ptr& i } inputTimeStampUs_ = GetEncoderTimeStamp(); DHLOGD("Encoder input buffer size %d, timeStamp %lld.", inputBuffer->Size(), (long long)inputTimeStampUs_); - surfacebuffer->GetExtraData()->ExtraSet("timeStamp", inputTimeStampUs_); + surfacebuffer->ExtraSet("timeStamp", inputTimeStampUs_); BufferFlushConfig flushConfig = { {0, 0, sourceConfig_.GetWidth(), sourceConfig_.GetHeight()}, 0}; SurfaceError ret = encodeProducerSurface_->FlushBuffer(surfacebuffer, -1, flushConfig); if (ret != SURFACE_ERROR_OK) { @@ -400,7 +432,7 @@ int32_t EncodeDataProcess::GetEncoderOutputBuffer(uint32_t index, Media::AVCodec return EncodeDone(nextInputBuffers); } -int32_t EncodeDataProcess::EncodeDone(std::vector> outputBuffers) +int32_t EncodeDataProcess::EncodeDone(std::vector>& outputBuffers) { DHLOGD("Encoder done."); if (outputBuffers.empty()) { diff --git a/services/data_process/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp similarity index 92% rename from services/data_process/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp rename to services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp index 000e5c8b..a08ed0e9 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp @@ -192,6 +192,49 @@ int32_t EncodeDataProcess::InitEncoderBitrateFormat() return DCAMERA_OK; } +int32_t EncodeDataProcess::StopVideoEncoder() +{ + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before StopVideoEncoder."); + return DCAMERA_BAD_VALUE; + } + int32_t ret = videoEncoder_->Flush(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoEncoder flush failed. Error type: %d.", ret); + return DCAMERA_BAD_OPERATE; + } + ret = videoEncoder_->Stop(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoEncoder stop failed. Error type: %d.", ret); + return DCAMERA_BAD_OPERATE; + } + return DCAMERA_OK; +} + +void EncodeDataProcess::ReleaseVideoEncoder() +{ + std::lock_guard lck(mtxEncoderState_); + DHLOGD("Start release videoEncoder."); + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before ReleaseVideoEncoder."); + encodeProducerSurface_ = nullptr; + encodeVideoCallback_ = nullptr; + return; + } + + int32_t ret = StopVideoEncoder(); + if (ret != DCAMERA_OK) { + DHLOGE("StopVideoEncoder failed."); + } + ret = videoEncoder_->Release(); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("VideoEncoder release failed. Error type: %d.", ret); + } + encodeProducerSurface_ = nullptr; + videoEncoder_ = nullptr; + encodeVideoCallback_ = nullptr; +} + void EncodeDataProcess::ReleaseProcessNode() { DHLOGD("Start release [%d] node : EncodeNode.", nodeRank_); @@ -200,18 +243,7 @@ void EncodeDataProcess::ReleaseProcessNode() nextDataProcess_->ReleaseProcessNode(); } - { - std::lock_guard lck(mtxEncoderState_); - if (videoEncoder_ != nullptr) { - DHLOGD("Start release videoEncoder."); - videoEncoder_->Flush(); - videoEncoder_->Stop(); - videoEncoder_->Release(); - encodeProducerSurface_ = nullptr; - videoEncoder_ = nullptr; - encodeVideoCallback_ = nullptr; - } - } + ReleaseVideoEncoder(); waitEncoderOutputCount_ = 0; lastFeedEncoderInputBufferTimeUs_ = 0; @@ -291,8 +323,7 @@ int32_t EncodeDataProcess::FeedEncoderInputBuffer(std::shared_ptr& i } inputTimeStampUs_ = GetEncoderTimeStamp(); DHLOGD("Encoder input buffer size %d, timeStamp %lld.", inputBuffer->Size(), (long long)inputTimeStampUs_); - - surfacebuffer->GetExtraData()->ExtraSet("timeStamp", inputTimeStampUs_); + surfacebuffer->ExtraSet("timeStamp", inputTimeStampUs_); BufferFlushConfig flushConfig = { {0, 0, sourceConfig_.GetWidth(), sourceConfig_.GetHeight()}, 0}; SurfaceError ret = encodeProducerSurface_->FlushBuffer(surfacebuffer, -1, flushConfig); if (ret != SURFACE_ERROR_OK) { @@ -361,7 +392,7 @@ int32_t EncodeDataProcess::GetEncoderOutputBuffer(uint32_t index, Media::AVCodec return EncodeDone(nextInputBuffers); } -int32_t EncodeDataProcess::EncodeDone(std::vector> outputBuffers) +int32_t EncodeDataProcess::EncodeDone(std::vector>& outputBuffers) { DHLOGD("Encoder done."); if (outputBuffers.empty()) { diff --git a/services/data_process/src/pipeline_node/multimedia_codec/encode_video_callback.cpp b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_video_callback.cpp similarity index 100% rename from services/data_process/src/pipeline_node/multimedia_codec/encode_video_callback.cpp rename to services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_video_callback.cpp -- Gitee From 5c144d3dc171c40096423b61dd68292c8bef3aeb Mon Sep 17 00:00:00 2001 From: t00605578 Date: Sat, 16 Apr 2022 16:26:37 +0800 Subject: [PATCH 2/4] fix codex for pr29-1 Signed-off-by: t00605578 --- .../multimedia_codec/encoder/encode_data_process.h | 1 + .../encoder/encode_data_process_common.cpp | 10 ++++------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h index 6bcac38f..7180197c 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h @@ -74,6 +74,7 @@ private: private: const static int32_t ENCODER_STRIDE_ALIGNMENT = 8; const static int64_t NORM_YUV420_BUFFER_SIZE = 1920 * 1080 * 3 / 2; + const static int32_t NORM_RGB32_BUFFER_SIZE = 1920 * 1080 * 4; const static uint32_t MAX_FRAME_RATE = 30; const static uint32_t MIN_VIDEO_WIDTH = 320; const static uint32_t MIN_VIDEO_HEIGHT = 240; diff --git a/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp index a08ed0e9..79aa0260 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp @@ -148,12 +148,10 @@ int32_t EncodeDataProcess::InitEncoderMetadataFormat() metadataFormat_.PutStringValue("codec_mime", processType_); metadataFormat_.PutIntValue("codec_profile", Media::MPEG4Profile::MPEG4_PROFILE_ADVANCED_CODING); - int32_t width = (int32_t)sourceConfig_.GetWidth(); - int32_t height = (int32_t)sourceConfig_.GetHeight(); metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::RGBA); - metadataFormat_.PutLongValue("max_input_size", width * height * 4); - metadataFormat_.PutIntValue("width", width); - metadataFormat_.PutIntValue("height", height); + metadataFormat_.PutLongValue("max_input_size", NORM_RGB32_BUFFER_SIZE); + metadataFormat_.PutIntValue("width", static_cast(sourceConfig_.GetWidth())); + metadataFormat_.PutIntValue("height", static_cast(sourceConfig_.GetHeight())); metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); return DCAMERA_OK; } @@ -270,7 +268,7 @@ int32_t EncodeDataProcess::ProcessData(std::vector>& return DCAMERA_INIT_ERR; } size_t bufferSize = 1920 * 1808 * 4; - if (inputBuffers[0]->Size() > bufferSize) { + if (inputBuffers[0]->Size() > NORM_RGB32_BUFFER_SIZE) { DHLOGE("EncodeNode input buffer size %d error.", inputBuffers[0]->Size()); return DCAMERA_MEMORY_OPT_ERROR; } -- Gitee From 15a15e429bea2532dee217abd813eb94e4ff3f02 Mon Sep 17 00:00:00 2001 From: t00605578 Date: Sat, 16 Apr 2022 17:52:27 +0800 Subject: [PATCH 3/4] fix codex for pr29-2 Signed-off-by: t00605578 --- services/data_process/BUILD.gn | 2 +- .../decoder/decode_data_process.h | 24 +++++++++---------- .../encoder/encode_data_process.h | 16 ++++++------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/services/data_process/BUILD.gn b/services/data_process/BUILD.gn index 13a1b4b7..74c3409b 100644 --- a/services/data_process/BUILD.gn +++ b/services/data_process/BUILD.gn @@ -51,8 +51,8 @@ ohos_shared_library("distributed_camera_data_process") { "src/pipeline/dcamera_pipeline_source.cpp", "src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp", "src/pipeline_node/fpscontroller/fps_controller_process.cpp", - "src/pipeline_node/multimedia_codec/decoder/decode_video_callback.cpp", "src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_video_callback.cpp", "src/pipeline_node/multimedia_codec/encoder/encode_video_callback.cpp", "src/utils/image_common_type.cpp", ] diff --git a/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h index a15186ca..f53b04a2 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h @@ -16,32 +16,32 @@ #ifndef OHOS_DECODE_DATA_PROCESS_H #define OHOS_DECODE_DATA_PROCESS_H -#include "securec.h" +#include #include -#include #include #include -#include +#include -#include "surface.h" -#include "ibuffer_consumer_listener.h" -#include "media_errors.h" #include "avcodec_common.h" -#include "format.h" -#include "avsharedmemory.h" #include "avcodec_video_decoder.h" +#include "avsharedmemory.h" #include "event.h" #include "event_bus.h" #include "event_sender.h" -#include "eventbus_handler.h" #include "event_registration.h" +#include "eventbus_handler.h" +#include "format.h" +#include "ibuffer_consumer_listener.h" +#include "media_errors.h" +#include "securec.h" +#include "surface.h" +#include "abstract_data_process.h" #include "data_buffer.h" -#include "distributed_camera_errno.h" -#include "image_common_type.h" #include "dcamera_codec_event.h" -#include "abstract_data_process.h" #include "dcamera_pipeline_source.h" +#include "distributed_camera_errno.h" +#include "image_common_type.h" namespace OHOS { namespace DistributedHardware { diff --git a/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h index 7180197c..fd7e375b 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h @@ -16,23 +16,23 @@ #ifndef OHOS_ENCODE_DATA_PROCESS_H #define OHOS_ENCODE_DATA_PROCESS_H -#include "securec.h" #include -#include #include +#include -#include "surface.h" -#include "media_errors.h" #include "avcodec_common.h" -#include "format.h" -#include "avsharedmemory.h" #include "avcodec_video_encoder.h" +#include "avsharedmemory.h" +#include "format.h" +#include "media_errors.h" +#include "securec.h" +#include "surface.h" +#include "abstract_data_process.h" #include "data_buffer.h" +#include "dcamera_pipeline_sink.h" #include "distributed_camera_errno.h" #include "image_common_type.h" -#include "abstract_data_process.h" -#include "dcamera_pipeline_sink.h" namespace OHOS { namespace DistributedHardware { -- Gitee From 0e6a0e0b2b04f02a11937799ab20d1decb98e490 Mon Sep 17 00:00:00 2001 From: t00605578 Date: Sat, 16 Apr 2022 18:50:12 +0800 Subject: [PATCH 4/4] fix codex for pr29-3 Signed-off-by: t00605578 --- .../decoder/decode_data_process.h | 2 +- .../encoder/encode_data_process.h | 2 +- .../decoder/decode_data_process_common.cpp | 3 +- .../encoder/encode_data_process.cpp | 2 +- .../encoder/encode_data_process_common.cpp | 4 +- services/data_process_yuan/BUILD.gn | 94 +++ .../include/eventbus/dcamera_codec_event.h | 90 +++ .../include/eventbus/dcamera_pipeline_event.h | 101 +++ .../interfaces/data_process_listener.h | 38 + .../interfaces/idata_process_pipeline.h | 40 + .../include/pipeline/abstract_data_process.h | 44 ++ .../include/pipeline/dcamera_pipeline_sink.h | 72 ++ .../pipeline/dcamera_pipeline_source.h | 76 ++ .../convert_nv12_to_nv21.h | 48 ++ .../fpscontroller/fps_controller_process.h | 79 ++ .../multimedia_codec/decode_data_process.h | 146 ++++ .../multimedia_codec/decode_video_callback.h | 45 ++ .../multimedia_codec/encode_data_process.h | 124 +++ .../multimedia_codec/encode_video_callback.h | 45 ++ .../include/utils/image_common_type.h | 79 ++ .../src/pipeline/abstract_data_process.cpp | 37 + .../src/pipeline/dcamera_pipeline_sink.cpp | 176 +++++ .../src/pipeline/dcamera_pipeline_source.cpp | 209 +++++ .../convert_nv12_to_nv21.cpp | 364 +++++++++ .../fpscontroller/fps_controller_process.cpp | 331 ++++++++ .../multimedia_codec/decode_data_process.cpp | 712 ++++++++++++++++++ .../decode_data_process_common.cpp | 675 +++++++++++++++++ .../decode_video_callback.cpp | 67 ++ .../multimedia_codec/encode_data_process.cpp | 498 ++++++++++++ .../encode_data_process_common.cpp | 459 +++++++++++ .../encode_video_callback.cpp | 66 ++ .../src/utils/image_common_type.cpp | 66 ++ 32 files changed, 4788 insertions(+), 6 deletions(-) create mode 100644 services/data_process_yuan/BUILD.gn create mode 100644 services/data_process_yuan/include/eventbus/dcamera_codec_event.h create mode 100644 services/data_process_yuan/include/eventbus/dcamera_pipeline_event.h create mode 100644 services/data_process_yuan/include/interfaces/data_process_listener.h create mode 100644 services/data_process_yuan/include/interfaces/idata_process_pipeline.h create mode 100644 services/data_process_yuan/include/pipeline/abstract_data_process.h create mode 100644 services/data_process_yuan/include/pipeline/dcamera_pipeline_sink.h create mode 100644 services/data_process_yuan/include/pipeline/dcamera_pipeline_source.h create mode 100644 services/data_process_yuan/include/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.h create mode 100644 services/data_process_yuan/include/pipeline_node/fpscontroller/fps_controller_process.h create mode 100644 services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_data_process.h create mode 100644 services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_video_callback.h create mode 100644 services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_data_process.h create mode 100644 services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_video_callback.h create mode 100644 services/data_process_yuan/include/utils/image_common_type.h create mode 100644 services/data_process_yuan/src/pipeline/abstract_data_process.cpp create mode 100644 services/data_process_yuan/src/pipeline/dcamera_pipeline_sink.cpp create mode 100644 services/data_process_yuan/src/pipeline/dcamera_pipeline_source.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/fpscontroller/fps_controller_process.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_video_callback.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp create mode 100644 services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_video_callback.cpp create mode 100644 services/data_process_yuan/src/utils/image_common_type.cpp diff --git a/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h index f53b04a2..1e96fedc 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/decoder/decode_data_process.h @@ -114,7 +114,7 @@ private: std::shared_ptr eventBusDecode_ = nullptr; std::shared_ptr eventBusRegHandleDecode_ = nullptr; std::shared_ptr eventBusRegHandlePipeline2Decode_ = nullptr; - std::shared_ptr videoDecoder_ = nullptr; + std::shared_ptr videoDecoder_ = nullptr; std::shared_ptr decodeVideoCallback_ = nullptr; sptr decodeConsumerSurface_ = nullptr; sptr decodeProducerSurface_ = nullptr; diff --git a/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h index fd7e375b..d77dede6 100644 --- a/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h +++ b/services/data_process/include/pipeline_node/multimedia_codec/encoder/encode_data_process.h @@ -110,7 +110,7 @@ private: VideoConfigParams sourceConfig_; VideoConfigParams targetConfig_; std::weak_ptr callbackPipelineSink_; - std::shared_ptr videoEncoder_ = nullptr; + std::shared_ptr videoEncoder_ = nullptr; std::shared_ptr encodeVideoCallback_ = nullptr; sptr encodeProducerSurface_ = nullptr; diff --git a/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp index 8ec1b045..f8b6225b 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp @@ -521,7 +521,8 @@ void DecodeDataProcess::OnEvent(DCameraCodecEvent& ev) OnError(); return; } - DecodeDone(receivedCodecPacket->GetDataBuffers()); + std::vector> rgbDataBuffers = receivedCodecPacket->GetDataBuffers(); + DecodeDone(rgbDataBuffers); break; } case VideoCodecAction::ACTION_ONCE_AGAIN: diff --git a/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp index de751469..a72eb3e7 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp @@ -343,7 +343,7 @@ int32_t EncodeDataProcess::FeedEncoderInputBuffer(std::shared_ptr& i } inputTimeStampUs_ = GetEncoderTimeStamp(); DHLOGD("Encoder input buffer size %d, timeStamp %lld.", inputBuffer->Size(), (long long)inputTimeStampUs_); - surfacebuffer->ExtraSet("timeStamp", inputTimeStampUs_); + surfacebuffer->GetExtraData()->ExtraSet("timeStamp", inputTimeStampUs_); BufferFlushConfig flushConfig = { {0, 0, sourceConfig_.GetWidth(), sourceConfig_.GetHeight()}, 0}; SurfaceError ret = encodeProducerSurface_->FlushBuffer(surfacebuffer, -1, flushConfig); if (ret != SURFACE_ERROR_OK) { diff --git a/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp index 79aa0260..003f8218 100644 --- a/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp +++ b/services/data_process/src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp @@ -267,7 +267,7 @@ int32_t EncodeDataProcess::ProcessData(std::vector>& DHLOGE("The video encoder does not exist before encoding data."); return DCAMERA_INIT_ERR; } - size_t bufferSize = 1920 * 1808 * 4; + if (inputBuffers[0]->Size() > NORM_RGB32_BUFFER_SIZE) { DHLOGE("EncodeNode input buffer size %d error.", inputBuffers[0]->Size()); return DCAMERA_MEMORY_OPT_ERROR; @@ -321,7 +321,7 @@ int32_t EncodeDataProcess::FeedEncoderInputBuffer(std::shared_ptr& i } inputTimeStampUs_ = GetEncoderTimeStamp(); DHLOGD("Encoder input buffer size %d, timeStamp %lld.", inputBuffer->Size(), (long long)inputTimeStampUs_); - surfacebuffer->ExtraSet("timeStamp", inputTimeStampUs_); + surfacebuffer->GetExtraData()->ExtraSet("timeStamp", inputTimeStampUs_); BufferFlushConfig flushConfig = { {0, 0, sourceConfig_.GetWidth(), sourceConfig_.GetHeight()}, 0}; SurfaceError ret = encodeProducerSurface_->FlushBuffer(surfacebuffer, -1, flushConfig); if (ret != SURFACE_ERROR_OK) { diff --git a/services/data_process_yuan/BUILD.gn b/services/data_process_yuan/BUILD.gn new file mode 100644 index 00000000..74c3409b --- /dev/null +++ b/services/data_process_yuan/BUILD.gn @@ -0,0 +1,94 @@ +# Copyright (c) 2021 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/ohos.gni") +import("//build/ohos_var.gni") +import( + "//foundation/distributedhardware/distributedcamera/distributedcamera.gni") + +ohos_shared_library("distributed_camera_data_process") { + include_dirs = [ + "//utils/native/base/include", + "//utils/system/safwk/native/include", + "//foundation/graphic/standard/interfaces/innerkits/common", + "//foundation/graphic/standard/interfaces/innerkits/surface", + "//drivers/peripheral/display/interfaces/include", + "//foundation/multimedia/media_standard/interfaces/innerkits/native/media/include", + "${fwk_common_path}/log/include", + "${fwk_common_path}/utils/include", + "${fwk_utils_path}/include/log", + "${fwk_utils_path}/include/eventbus", + "${fwk_utils_path}/include", + ] + + include_dirs += [ + "include/interfaces", + "include/eventbus", + "include/pipeline", + "include/utils", + "include/pipeline_node/multimedia_codec/decoder", + "include/pipeline_node/multimedia_codec/encoder", + "include/pipeline_node/colorspace_conversion", + "include/pipeline_node/fpscontroller", + "${common_path}/include/constants", + "${common_path}/include/utils", + "${innerkits_path}/native_cpp/camera_source/include", + ] + + sources = [ + "src/pipeline/abstract_data_process.cpp", + "src/pipeline/dcamera_pipeline_sink.cpp", + "src/pipeline/dcamera_pipeline_source.cpp", + "src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp", + "src/pipeline_node/fpscontroller/fps_controller_process.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_surface_listener.cpp", + "src/pipeline_node/multimedia_codec/decoder/decode_video_callback.cpp", + "src/pipeline_node/multimedia_codec/encoder/encode_video_callback.cpp", + "src/utils/image_common_type.cpp", + ] + + if ("${product_name}" == "m40") { + sources += [ + "src/pipeline_node/multimedia_codec/decoder/decode_data_process.cpp", + "src/pipeline_node/multimedia_codec/encoder/encode_data_process.cpp", + ] + } else { + sources += [ + "src/pipeline_node/multimedia_codec/decoder/decode_data_process_common.cpp", + "src/pipeline_node/multimedia_codec/encoder/encode_data_process_common.cpp", + ] + } + + deps = [ + "${common_path}:distributed_camera_utils", + "${fwk_utils_path}:distributedhardwareutils", + "//foundation/graphic/standard/frameworks/surface:surface", + "//utils/native/base:utils", + ] + + defines = [ + "HI_LOG_ENABLE", + "DH_LOG_TAG=\"dcameradataproc\"", + "LOG_DOMAIN=0xD004100", + ] + + external_deps = [ + "eventhandler:libeventhandler", + "hiviewdfx_hilog_native:libhilog", + "multimedia_media_standard:media_client", + ] + + subsystem_name = "distributedhardware" + + part_name = "distributed_camera" +} diff --git a/services/data_process_yuan/include/eventbus/dcamera_codec_event.h b/services/data_process_yuan/include/eventbus/dcamera_codec_event.h new file mode 100644 index 00000000..91a77be9 --- /dev/null +++ b/services/data_process_yuan/include/eventbus/dcamera_codec_event.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DCAMERA_CODEC_EVENT_H +#define OHOS_DCAMERA_CODEC_EVENT_H + +#include + +#include "event.h" +#include "data_buffer.h" +#include "image_common_type.h" + +namespace OHOS { +namespace DistributedHardware { +enum class VideoCodecAction : int32_t { + NO_ACTION = 0, + ACTION_ONCE_AGAIN = 1, +}; + +class CodecPacket { +public: + CodecPacket() : videoCodec_(VideoCodecType::NO_CODEC) {} + CodecPacket(VideoCodecType videoCodec, const std::vector>& multiDataBuffers) + : videoCodec_(videoCodec), multiDataBuffers_(multiDataBuffers) {} + ~CodecPacket() = default; + + void SetVideoCodecType(VideoCodecType videoCodec) + { + videoCodec_ = videoCodec; + } + + VideoCodecType GetVideoCodecType() const + { + return videoCodec_; + } + + void SetDataBuffers(std::vector>& multiDataBuffers) + { + multiDataBuffers_ = multiDataBuffers; + } + + std::vector> GetDataBuffers() const + { + return multiDataBuffers_; + } + +private: + VideoCodecType videoCodec_; + std::vector> multiDataBuffers_; +}; + +class DCameraCodecEvent : public Event { + TYPEINDENT(DCameraCodecEvent) +public: + DCameraCodecEvent(EventSender& sender, const std::shared_ptr& codecPacket) + : Event(sender), codecPacket_(codecPacket), action_(VideoCodecAction::NO_ACTION) {} + DCameraCodecEvent(EventSender& sender, const std::shared_ptr& codecPacket, + VideoCodecAction otherAction) + : Event(sender), codecPacket_(codecPacket), action_(otherAction) {} + ~DCameraCodecEvent() {} + + std::shared_ptr GetCodecPacket() const + { + return codecPacket_; + } + + VideoCodecAction GetAction() const + { + return action_; + } + +private: + std::shared_ptr codecPacket_; + VideoCodecAction action_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DCAMERA_CODEC_EVENT_H diff --git a/services/data_process_yuan/include/eventbus/dcamera_pipeline_event.h b/services/data_process_yuan/include/eventbus/dcamera_pipeline_event.h new file mode 100644 index 00000000..5c2534cb --- /dev/null +++ b/services/data_process_yuan/include/eventbus/dcamera_pipeline_event.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DCAMERA_PIPELINE_EVENT_H +#define OHOS_DCAMERA_PIPELINE_EVENT_H + +#include + +#include "event.h" +#include "data_buffer.h" +#include "image_common_type.h" + +namespace OHOS { +namespace DistributedHardware { +enum class PipelineAction : int32_t { + NO_ACTION = 0, +}; + +class PipelineConfig { +public: + PipelineConfig() : pipelineType_(PipelineType::VIDEO) {} + PipelineConfig(PipelineType pipelineType, const std::string& pipelineOwner, + const std::vector>& multiDataBuffers) + : pipelineType_(pipelineType), pipelineOwner_(pipelineOwner), multiDataBuffers_(multiDataBuffers) {} + ~PipelineConfig() = default; + + void SetPipelineType(PipelineType pipelineType) + { + pipelineType_ = pipelineType; + } + + PipelineType GetPipelineType() const + { + return pipelineType_; + } + + void SetPipelineOwner(std::string pipelineOwner) + { + pipelineOwner_ = pipelineOwner; + } + + std::string GetPipelineOwner() const + { + return pipelineOwner_; + } + + void SetDataBuffers(std::vector>& multiDataBuffers) + { + multiDataBuffers_ = multiDataBuffers; + } + + std::vector> GetDataBuffers() const + { + return multiDataBuffers_; + } + +private: + PipelineType pipelineType_; + std::string pipelineOwner_; + std::vector> multiDataBuffers_; +}; + +class DCameraPipelineEvent : public Event { + TYPEINDENT(DCameraPipelineEvent) +public: + DCameraPipelineEvent(EventSender& sender, const std::shared_ptr& pipelineConfig) + : Event(sender), pipelineConfig_(pipelineConfig), action_(PipelineAction::NO_ACTION) {} + DCameraPipelineEvent(EventSender& sender, const std::shared_ptr& pipelineConfig, + PipelineAction otherAction) + : Event(sender), pipelineConfig_(pipelineConfig), action_(otherAction) {} + ~DCameraPipelineEvent() = default; + + std::shared_ptr GetPipelineConfig() const + { + return pipelineConfig_; + } + + PipelineAction GetAction() const + { + return action_; + } + +private: + std::shared_ptr pipelineConfig_ = nullptr; + PipelineAction action_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DCAMERA_PIPELINE_EVENT_H diff --git a/services/data_process_yuan/include/interfaces/data_process_listener.h b/services/data_process_yuan/include/interfaces/data_process_listener.h new file mode 100644 index 00000000..ce79eb8e --- /dev/null +++ b/services/data_process_yuan/include/interfaces/data_process_listener.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DATA_PROCESS_LISTENER_H +#define OHOS_DATA_PROCESS_LISTENER_H + +#include "data_buffer.h" + +namespace OHOS { +namespace DistributedHardware { +enum DataProcessErrorType : int32_t { + ERROR_PIPELINE_ENCODER = 0, + ERROR_PIPELINE_DECODER = -1, + ERROR_PIPELINE_EVENTBUS = -2, + ERROR_DISABLE_PROCESS = -3, +}; + +class DataProcessListener { +public: + virtual ~DataProcessListener() = default; + virtual void OnProcessedVideoBuffer(const std::shared_ptr& videoResult) = 0; + virtual void OnError(DataProcessErrorType errorType) = 0; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DATA_PROCESS_LISTENER_H diff --git a/services/data_process_yuan/include/interfaces/idata_process_pipeline.h b/services/data_process_yuan/include/interfaces/idata_process_pipeline.h new file mode 100644 index 00000000..053c1af6 --- /dev/null +++ b/services/data_process_yuan/include/interfaces/idata_process_pipeline.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_IDATA_PROCESS_PIPELINE_H +#define OHOS_IDATA_PROCESS_PIPELINE_H + +#include +#include + +#include "data_buffer.h" +#include "image_common_type.h" +#include "distributed_camera_errno.h" +#include "data_process_listener.h" + +namespace OHOS { +namespace DistributedHardware { +class IDataProcessPipeline { +public: + virtual ~IDataProcessPipeline() = default; + + virtual int32_t CreateDataProcessPipeline(PipelineType piplineType, const VideoConfigParams& sourceConfig, + const VideoConfigParams& targetConfig, const std::shared_ptr& listener) = 0; + virtual int32_t ProcessData(std::vector>& dataBuffers) = 0; + virtual void DestroyDataProcessPipeline() = 0; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_IDATA_PROCESS_PIPELINE_H diff --git a/services/data_process_yuan/include/pipeline/abstract_data_process.h b/services/data_process_yuan/include/pipeline/abstract_data_process.h new file mode 100644 index 00000000..7f18737c --- /dev/null +++ b/services/data_process_yuan/include/pipeline/abstract_data_process.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_ABSTRACT_DATA_PROCESS_H +#define OHOS_ABSTRACT_DATA_PROCESS_H + +#include +#include + +#include "data_buffer.h" +#include "image_common_type.h" +#include "distributed_camera_errno.h" + +namespace OHOS { +namespace DistributedHardware { +class AbstractDataProcess { +public: + virtual ~AbstractDataProcess() = default; + int32_t SetNextNode(std::shared_ptr& nextDataProcess); + void SetNodeRank(size_t curNodeRank); + + virtual int32_t InitNode() = 0; + virtual int32_t ProcessData(std::vector>& inputBuffers) = 0; + virtual void ReleaseProcessNode() = 0; + +protected: + std::shared_ptr nextDataProcess_ = nullptr; + size_t nodeRank_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_ABSTRACT_DATA_PROCESS_H diff --git a/services/data_process_yuan/include/pipeline/dcamera_pipeline_sink.h b/services/data_process_yuan/include/pipeline/dcamera_pipeline_sink.h new file mode 100644 index 00000000..13df907d --- /dev/null +++ b/services/data_process_yuan/include/pipeline/dcamera_pipeline_sink.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DCAMERA_PIPELINE_SINK_H +#define OHOS_DCAMERA_PIPELINE_SINK_H + +#include +#include + +#include "event.h" +#include "event_bus.h" +#include "event_sender.h" +#include "eventbus_handler.h" + +#include "data_buffer.h" +#include "image_common_type.h" +#include "distributed_camera_errno.h" +#include "dcamera_pipeline_event.h" +#include "idata_process_pipeline.h" +#include "abstract_data_process.h" +#include "data_process_listener.h" + +namespace OHOS { +namespace DistributedHardware { +class EncodeDataProcess; + +class DCameraPipelineSink : public IDataProcessPipeline, public std::enable_shared_from_this { +public: + ~DCameraPipelineSink(); + + int32_t CreateDataProcessPipeline(PipelineType piplineType, const VideoConfigParams& sourceConfig, + const VideoConfigParams& targetConfig, const std::shared_ptr& listener) override; + int32_t ProcessData(std::vector>& dataBuffers) override; + void DestroyDataProcessPipeline() override; + + void OnError(DataProcessErrorType errorType); + void OnProcessedVideoBuffer(const std::shared_ptr& videoResult); + +private: + bool IsInRange(const VideoConfigParams& curConfig); + int32_t InitDCameraPipNodes(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig); + +private: + const static std::string PIPELINE_OWNER; + const static uint32_t MAX_FRAME_RATE = 30; + const static uint32_t MIN_VIDEO_WIDTH = 320; + const static uint32_t MIN_VIDEO_HEIGHT = 240; + const static uint32_t MAX_VIDEO_WIDTH = 1920; + const static uint32_t MAX_VIDEO_HEIGHT = 1080; + + std::shared_ptr processListener_ = nullptr; + std::shared_ptr pipelineHead_ = nullptr; + + bool isProcess_ = false; + PipelineType piplineType_ = PipelineType::VIDEO; + std::vector> pipNodeRanks_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DCAMERA_PIPELINE_SINK_H diff --git a/services/data_process_yuan/include/pipeline/dcamera_pipeline_source.h b/services/data_process_yuan/include/pipeline/dcamera_pipeline_source.h new file mode 100644 index 00000000..3e665141 --- /dev/null +++ b/services/data_process_yuan/include/pipeline/dcamera_pipeline_source.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DCAMERA_PIPELINE_SOURCE_H +#define OHOS_DCAMERA_PIPELINE_SOURCE_H + +#include +#include + +#include "event.h" +#include "event_bus.h" +#include "event_sender.h" +#include "eventbus_handler.h" + +#include "data_buffer.h" +#include "image_common_type.h" +#include "distributed_camera_errno.h" +#include "dcamera_pipeline_event.h" +#include "idata_process_pipeline.h" +#include "abstract_data_process.h" +#include "data_process_listener.h" + +namespace OHOS { +namespace DistributedHardware { +class DecodeDataProcess; + +class DCameraPipelineSource : public EventSender, public EventBusHandler, + public IDataProcessPipeline, public std::enable_shared_from_this { +public: + ~DCameraPipelineSource(); + + int32_t CreateDataProcessPipeline(PipelineType piplineType, const VideoConfigParams& sourceConfig, + const VideoConfigParams& targetConfig, const std::shared_ptr& listener) override; + int32_t ProcessData(std::vector>& dataBuffers) override; + void DestroyDataProcessPipeline() override; + void OnEvent(DCameraPipelineEvent& ev) override; + + void OnError(DataProcessErrorType errorType); + void OnProcessedVideoBuffer(const std::shared_ptr& videoResult); + +private: + bool IsInRange(const VideoConfigParams& curConfig); + void InitDCameraPipEvent(); + int32_t InitDCameraPipNodes(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig); + +private: + const static std::string PIPELINE_OWNER; + const static uint32_t MAX_FRAME_RATE = 30; + const static uint32_t MIN_VIDEO_WIDTH = 320; + const static uint32_t MIN_VIDEO_HEIGHT = 240; + const static uint32_t MAX_VIDEO_WIDTH = 1920; + const static uint32_t MAX_VIDEO_HEIGHT = 1080; + + std::shared_ptr processListener_ = nullptr; + std::shared_ptr pipelineHead_ = nullptr; + std::shared_ptr eventBusSource_ = nullptr; + + bool isProcess_ = false; + PipelineType piplineType_ = PipelineType::VIDEO; + std::vector> pipNodeRanks_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DCAMERA_PIPELINE_SOURCE_H diff --git a/services/data_process_yuan/include/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.h b/services/data_process_yuan/include/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.h new file mode 100644 index 00000000..515da244 --- /dev/null +++ b/services/data_process_yuan/include/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_CONVERT_NV12TONV21_H +#define OHOS_CONVERT_NV12TONV21_H + +#include "securec.h" +#include "data_buffer.h" +#include "image_common_type.h" + +namespace OHOS { +namespace DistributedHardware { +class ConvertNV12ToNV21 { +public: + ConvertNV12ToNV21() = default; + ~ConvertNV12ToNV21() = default; + std::shared_ptr ProcessData(const std::shared_ptr& srcBuf, + const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig); + +private: + bool IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig); + int32_t GetImageUnitInfo(ImageUnitInfo& imgInfo, const std::shared_ptr& imgBuf); + bool IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo); + int32_t CheckColorConvertInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); + void SeparateUVPlaneByRow(const uint8_t *srcUVPlane, uint8_t *dstUPlane, uint8_t *dstVPlane, + int32_t srcHalfWidth); + int32_t SeparateNV12UVPlane(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); + void CombineUVPlaneByRow(const uint8_t *srcUPlane, const uint8_t *srcVPlane, uint8_t *dstUVPlane, + int32_t dstHalfWidth); + int32_t CombineNV12UVPlane(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); + int32_t CopyYPlane(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); + int32_t ColorConvertNV12ToNV21(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_CONVERT_NV12TONV21_H diff --git a/services/data_process_yuan/include/pipeline_node/fpscontroller/fps_controller_process.h b/services/data_process_yuan/include/pipeline_node/fpscontroller/fps_controller_process.h new file mode 100644 index 00000000..1dec54d6 --- /dev/null +++ b/services/data_process_yuan/include/pipeline_node/fpscontroller/fps_controller_process.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef OHOS_FPS_CONTROLLER_PROCESS_H +#define OHOS_FPS_CONTROLLER_PROCESS_H + +#include +#include + +#include "abstract_data_process.h" +#include "dcamera_pipeline_source.h" + +namespace OHOS { +namespace DistributedHardware { +class DCameraPipelineSource; + +class FpsControllerProcess : public AbstractDataProcess { +public: + FpsControllerProcess(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig, + const std::weak_ptr& callbackPipSource) + : sourceConfig_(sourceConfig), targetConfig_(targetConfig), callbackPipelineSource_(callbackPipSource) {} + ~FpsControllerProcess(); + + int32_t InitNode() override; + int32_t ProcessData(std::vector>& inputBuffers) override; + void ReleaseProcessNode() override; + +private: + void UpdateFPSControllerInfo(int64_t nowMs); + void UpdateFrameRateCorrectionFactor(int64_t nowMs); + void UpdateIncomingFrameTimes(int64_t nowMs); + float CalculateFrameRate(int64_t nowMs); + bool IsDropFrame(float incomingFps); + bool ReduceFrameRateByUniformStrategy(int32_t incomingFps); + int32_t FpsControllerDone(std::vector> outputBuffers); + +private: + const static uint32_t MAX_TARGET_FRAME_RATE = 30; + const static int32_t VIDEO_FRAME_DROP_INTERVAL = 4; + const static int32_t MIN_INCOME_FRAME_NUM_COEFFICIENT = 3; + const static int32_t INCOME_FRAME_TIME_HISTORY_WINDOWS_SIZE = 60; + /* Receive video frame detect time windows */ + const static int32_t FRAME_HISTORY_TIME_WINDOWS_MS = 2000; + const static int64_t FRMAE_MAX_INTERVAL_TIME_WINDOW_MS = 700; + const static int32_t OVERSHOOT_MODIFY_COEFFICIENT = 3; + const static int32_t DOUBLE_MULTIPLE = 2; + + std::mutex mtx; + VideoConfigParams sourceConfig_; + VideoConfigParams targetConfig_; + std::weak_ptr callbackPipelineSource_; + bool isFpsControllerProcess_ = false; + bool isFirstFrame_ = false; + uint32_t targetFrameRate_ = 0; + int64_t lastFrameIncomeTimeMs_ = 0; + /* the time span between current and last frame */ + int64_t recentFrameTimeSpanMs_ = -1; + int32_t keepCorrectionCount_ = 0; + int32_t keepLessThanDoubleCount_ = 0; + int32_t keepMoreThanDoubleCount_ = 0; + float frameRateCorrectionFactor_ = 0.0; + /* modify the frame rate controller argument */ + int32_t frameRateOvershootMdf_ = 0; + int64_t incomingFrameTimesMs_[INCOME_FRAME_TIME_HISTORY_WINDOWS_SIZE]; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_FPS_CONTROLLER_PROCESS_H \ No newline at end of file diff --git a/services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_data_process.h b/services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_data_process.h new file mode 100644 index 00000000..2c1e95bc --- /dev/null +++ b/services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_data_process.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DECODE_DATA_PROCESS_H +#define OHOS_DECODE_DATA_PROCESS_H + +#include "securec.h" +#include +#include +#include +#include +#include + +#include "surface.h" +#include "media_errors.h" +#include "avcodec_common.h" +#include "format.h" +#include "avsharedmemory.h" +#include "avcodec_video_decoder.h" +#include "event.h" +#include "event_bus.h" +#include "event_sender.h" +#include "eventbus_handler.h" +#include "event_registration.h" + +#include "data_buffer.h" +#include "distributed_camera_errno.h" +#include "image_common_type.h" +#include "dcamera_codec_event.h" +#include "abstract_data_process.h" +#include "dcamera_pipeline_source.h" + +namespace OHOS { +namespace DistributedHardware { +class DCameraPipelineSource; +class DecodeVideoCallback; + +class DecodeDataProcess : public EventSender, public EventBusHandler, public AbstractDataProcess, + public std::enable_shared_from_this { +public: + DecodeDataProcess(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig, + const std::shared_ptr& eventBusPipeline, + const std::weak_ptr& callbackPipSource) + : sourceConfig_(sourceConfig), targetConfig_(targetConfig), eventBusPipeline_(eventBusPipeline), + callbackPipelineSource_(callbackPipSource) {} + ~DecodeDataProcess(); + + int32_t InitNode() override; + int32_t ProcessData(std::vector>& inputBuffers) override; + void ReleaseProcessNode() override; + void OnEvent(DCameraCodecEvent& ev) override; + + void OnError(); + void OnInputBufferAvailable(uint32_t index); + void OnOutputFormatChanged(const Media::Format &format); + void OnOutputBufferAvailable(uint32_t index, const Media::AVCodecBufferInfo& info, + const Media::AVCodecBufferFlag& flag); + void GetDecoderOutputBuffer(const sptr& surface); + VideoConfigParams GetSourceConfig() const; + VideoConfigParams GetTargetConfig() const; + +private: + bool IsInDecoderRange(const VideoConfigParams& curConfig); + bool IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig); + void InitCodecEvent(); + int32_t InitDecoder(); + int32_t InitDecoderMetadataFormat(); + int32_t SetDecoderOutputSurface(); + int32_t FeedDecoderInputBuffer(); + int64_t GetDecoderTimeStamp(); + int32_t GetAlignedHeight(); + void CopyDecodedImage(const sptr& surBuf, int64_t timeStampUs, int32_t alignedWidth, + int32_t alignedHeight); + int32_t CopyYUVPlaneByRow(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); + int32_t CheckCopyImageInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo); + bool IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo); + void PostOutputDataBuffers(std::shared_ptr& outputBuffer); + int32_t DecodeDone(std::vector> outputBuffers); + +private: + const static int32_t VIDEO_DECODER_QUEUE_MAX = 1000; + const static int32_t MAX_YUV420_BUFFER_SIZE = 1920 * 1080 * 3 / 2 * 2; + const static uint32_t MAX_FRAME_RATE = 30; + const static uint32_t MIN_VIDEO_WIDTH = 320; + const static uint32_t MIN_VIDEO_HEIGHT = 240; + const static uint32_t MAX_VIDEO_WIDTH = 1920; + const static uint32_t MAX_VIDEO_HEIGHT = 1080; + const static int32_t FIRST_FRAME_INPUT_NUM = 2; + + std::mutex mtxDecoderState_; + std::mutex mtxHoldCount_; + VideoConfigParams sourceConfig_; + VideoConfigParams targetConfig_; + std::shared_ptr eventBusPipeline_; + std::weak_ptr callbackPipelineSource_; + std::shared_ptr eventBusDecode_ = nullptr; + std::shared_ptr eventBusRegHandleDecode_ = nullptr; + std::shared_ptr eventBusRegHandlePipeline2Decode_ = nullptr; + std::shared_ptr videoDecoder_ = nullptr; + std::shared_ptr decodeVideoCallback_ = nullptr; + sptr decodeConsumerSurface_ = nullptr; + sptr decodeProducerSurface_ = nullptr; + sptr decodeSurfaceListener_ = nullptr; + + bool isDecoderProcess_ = false; + int32_t waitDecoderOutputCount_ = 0; + int32_t alignedHeight_ = 0; + int64_t lastFeedDecoderInputBufferTimeUs_ = 0; + int64_t outputTimeStampUs_ = 0; + std::string processType_; + Media::Format metadataFormat_; + Media::Format decodeOutputFormat_; + Media::AVCodecBufferInfo outputInfo_; + std::queue> inputBuffersQueue_; + std::queue availableInputIndexsQueue_; +}; + +class DecodeSurfaceListener : public IBufferConsumerListener { +public: + DecodeSurfaceListener(sptr surface, std::weak_ptr decodeVideoNode) + : surface_(surface), decodeVideoNode_(decodeVideoNode) {} + ~DecodeSurfaceListener(); + + void OnBufferAvailable() override; + void SetSurface(const sptr& surface); + void SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode); + +private: + sptr surface_; + std::weak_ptr decodeVideoNode_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DECODE_DATA_PROCESS_H diff --git a/services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_video_callback.h b/services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_video_callback.h new file mode 100644 index 00000000..cec129eb --- /dev/null +++ b/services/data_process_yuan/include/pipeline_node/multimedia_codec/decode_video_callback.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DECODE_VIDEO_CALLBACK_H +#define OHOS_DECODE_VIDEO_CALLBACK_H + +#include "media_errors.h" +#include "avcodec_common.h" +#include "format.h" + +#include "decode_data_process.h" + +namespace OHOS { +namespace DistributedHardware { +class DecodeDataProcess; + +class DecodeVideoCallback : public Media::AVCodecCallback { +public: + explicit DecodeVideoCallback(const std::weak_ptr& decodeVideoNode) + : decodeVideoNode_(decodeVideoNode) {} + ~DecodeVideoCallback() = default; + + void OnError(Media::AVCodecErrorType errorType, int32_t errorCode) override; + void OnInputBufferAvailable(uint32_t index) override; + void OnOutputFormatChanged(const Media::Format &format) override; + void OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, + Media::AVCodecBufferFlag flag) override; +private: + std::weak_ptr decodeVideoNode_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DECODE_VIDEO_CALLBACK_H diff --git a/services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_data_process.h b/services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_data_process.h new file mode 100644 index 00000000..8d87fc5e --- /dev/null +++ b/services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_data_process.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_ENCODE_DATA_PROCESS_H +#define OHOS_ENCODE_DATA_PROCESS_H + +#include "securec.h" +#include +#include +#include + +#include "surface.h" +#include "media_errors.h" +#include "avcodec_common.h" +#include "format.h" +#include "avsharedmemory.h" +#include "avcodec_video_encoder.h" + +#include "data_buffer.h" +#include "distributed_camera_errno.h" +#include "image_common_type.h" +#include "abstract_data_process.h" +#include "dcamera_pipeline_sink.h" + +namespace OHOS { +namespace DistributedHardware { +class DCameraPipelineSink; +class EncodeVideoCallback; + +class EncodeDataProcess : public AbstractDataProcess, public std::enable_shared_from_this { +public: + EncodeDataProcess(const VideoConfigParams &sourceConfig, const VideoConfigParams &targetConfig, + const std::weak_ptr& callbackPipSink) + : sourceConfig_(sourceConfig), targetConfig_(targetConfig), callbackPipelineSink_(callbackPipSink) {} + ~EncodeDataProcess(); + + int32_t InitNode() override; + int32_t ProcessData(std::vector>& inputBuffers) override; + void ReleaseProcessNode() override; + + void OnError(); + void OnInputBufferAvailable(uint32_t index); + void OnOutputFormatChanged(const Media::Format &format); + void OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, Media::AVCodecBufferFlag flag); + VideoConfigParams GetSourceConfig() const; + VideoConfigParams GetTargetConfig() const; + +private: + bool IsInEncoderRange(const VideoConfigParams& curConfig); + bool IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig); + int32_t InitEncoder(); + int32_t InitEncoderMetadataFormat(); + int32_t InitEncoderBitrateFormat(); + int32_t FeedEncoderInputBuffer(std::shared_ptr& inputBuffer); + sptr GetEncoderInputSurfaceBuffer(); + int64_t GetEncoderTimeStamp(); + int32_t GetEncoderOutputBuffer(uint32_t index, Media::AVCodecBufferInfo info); + int32_t EncodeDone(std::vector> outputBuffers); + +private: + const static int32_t ENCODER_STRIDE_ALIGNMENT = 8; + const static int64_t NORM_YUV420_BUFFER_SIZE = 1920 * 1080 * 3 / 2; + const static uint32_t MAX_FRAME_RATE = 30; + const static uint32_t MIN_VIDEO_WIDTH = 320; + const static uint32_t MIN_VIDEO_HEIGHT = 240; + const static uint32_t MAX_VIDEO_WIDTH = 1920; + const static uint32_t MAX_VIDEO_HEIGHT = 1080; + const static int32_t IDR_FRAME_INTERVAL_MS = 300; + const static int32_t FIRST_FRAME_OUTPUT_NUM = 2; + + const static int64_t WIDTH_320_HEIGHT_240 = 320 * 240; + const static int64_t WIDTH_480_HEIGHT_360 = 480 * 360; + const static int64_t WIDTH_640_HEIGHT_360 = 640 * 360; + const static int64_t WIDTH_640_HEIGHT_480 = 640 * 480; + const static int64_t WIDTH_720_HEIGHT_540 = 720 * 540; + const static int64_t WIDTH_960_HEIGHT_540 = 960 * 540; + const static int64_t WIDTH_960_HEIGHT_720 = 960 * 720; + const static int64_t WIDTH_1280_HEIGHT_720 = 1280 * 720; + const static int64_t WIDTH_1440_HEIGHT_1080 = 1440 * 1080; + const static int64_t WIDTH_1920_HEIGHT_1080 = 1920 * 1080; + const static int32_t BITRATE_500000 = 500000; + const static int32_t BITRATE_1110000 = 1110000; + const static int32_t BITRATE_1500000 = 1500000; + const static int32_t BITRATE_1800000 = 1800000; + const static int32_t BITRATE_2100000 = 2100000; + const static int32_t BITRATE_2300000 = 2300000; + const static int32_t BITRATE_2800000 = 2800000; + const static int32_t BITRATE_3400000 = 3400000; + const static int32_t BITRATE_5000000 = 5000000; + const static int32_t BITRATE_6000000 = 6000000; + const static std::map ENCODER_BITRATE_TABLE; + + std::mutex mtxEncoderState_; + std::mutex mtxHoldCount_; + VideoConfigParams sourceConfig_; + VideoConfigParams targetConfig_; + std::weak_ptr callbackPipelineSink_; + std::shared_ptr videoEncoder_ = nullptr; + std::shared_ptr encodeVideoCallback_ = nullptr; + sptr encodeProducerSurface_ = nullptr; + + bool isEncoderProcess_ = false; + int32_t waitEncoderOutputCount_ = 0; + int64_t lastFeedEncoderInputBufferTimeUs_ = 0; + int64_t inputTimeStampUs_ = 0; + std::string processType_; + Media::Format metadataFormat_; + Media::Format encodeOutputFormat_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_ENCODE_DATA_PROCESS_H diff --git a/services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_video_callback.h b/services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_video_callback.h new file mode 100644 index 00000000..34f6fc11 --- /dev/null +++ b/services/data_process_yuan/include/pipeline_node/multimedia_codec/encode_video_callback.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_DECODE_VIDEO_CALLBACK_H +#define OHOS_DECODE_VIDEO_CALLBACK_H + +#include "media_errors.h" +#include "avcodec_common.h" +#include "format.h" + +#include "encode_data_process.h" + +namespace OHOS { +namespace DistributedHardware { +class EncodeDataProcess; + +class EncodeVideoCallback : public Media::AVCodecCallback { +public: + explicit EncodeVideoCallback(const std::weak_ptr& encodeVideoNode) + : encodeVideoNode_(encodeVideoNode) {} + ~EncodeVideoCallback() = default; + + void OnError(Media::AVCodecErrorType errorType, int32_t errorCode) override; + void OnInputBufferAvailable(uint32_t index) override; + void OnOutputFormatChanged(const Media::Format &format) override; + void OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, + Media::AVCodecBufferFlag flag) override; +private: + std::weak_ptr encodeVideoNode_; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_DECODE_VIDEO_CALLBACK_H diff --git a/services/data_process_yuan/include/utils/image_common_type.h b/services/data_process_yuan/include/utils/image_common_type.h new file mode 100644 index 00000000..9e302d96 --- /dev/null +++ b/services/data_process_yuan/include/utils/image_common_type.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef OHOS_IMAGE_COMMON_TYPE_H +#define OHOS_IMAGE_COMMON_TYPE_H + +#include +#include + +namespace OHOS { +namespace DistributedHardware { +enum class PipelineType : int32_t { + VIDEO = 0, + PHOTO_JPEG, +}; + +enum class VideoCodecType : int32_t { + NO_CODEC = 0, + CODEC_H264, + CODEC_H265, +}; + +enum class Videoformat : int32_t { + YUVI420 = 0, + NV12, + NV21, +}; + +class VideoConfigParams { +public: + VideoConfigParams(VideoCodecType videoCodec, Videoformat pixelFormat, uint32_t frameRate, uint32_t width, + uint32_t height) + : videoCodec_(videoCodec), pixelFormat_(pixelFormat), frameRate_(frameRate), width_ (width), height_(height) + {} + ~VideoConfigParams() = default; + + void SetVideoCodecType(VideoCodecType videoCodec); + void SetVideoformat(Videoformat pixelFormat); + void SetFrameRate(uint32_t frameRate); + void SetWidthAndHeight(uint32_t width, uint32_t height); + VideoCodecType GetVideoCodecType() const; + Videoformat GetVideoformat() const; + uint32_t GetFrameRate() const; + uint32_t GetWidth() const; + uint32_t GetHeight() const; + +private: + VideoCodecType videoCodec_; + Videoformat pixelFormat_; + uint32_t frameRate_; + uint32_t width_; + uint32_t height_; +}; + +struct ImageUnitInfo { + Videoformat colorFormat; + int32_t width; + int32_t height; + int32_t alignedWidth; + int32_t alignedHeight; + size_t chromaOffset; + size_t imgSize; + uint8_t *imgData; +}; +} // namespace DistributedHardware +} // namespace OHOS +#endif // OHOS_IMAGE_COMMON_TYPE_H diff --git a/services/data_process_yuan/src/pipeline/abstract_data_process.cpp b/services/data_process_yuan/src/pipeline/abstract_data_process.cpp new file mode 100644 index 00000000..0ee2e7cc --- /dev/null +++ b/services/data_process_yuan/src/pipeline/abstract_data_process.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "abstract_data_process.h" + +#include "distributed_hardware_log.h" + +namespace OHOS { +namespace DistributedHardware { +int32_t AbstractDataProcess::SetNextNode(std::shared_ptr& nextDataProcess) +{ + if (nextDataProcess == nullptr) { + DHLOGE("Next data process is invalid."); + return DCAMERA_BAD_VALUE; + } + nextDataProcess_ = nextDataProcess; + return DCAMERA_OK; +} + +void AbstractDataProcess::SetNodeRank(size_t curNodeRank) +{ + nodeRank_ = curNodeRank; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline/dcamera_pipeline_sink.cpp b/services/data_process_yuan/src/pipeline/dcamera_pipeline_sink.cpp new file mode 100644 index 00000000..afe508fa --- /dev/null +++ b/services/data_process_yuan/src/pipeline/dcamera_pipeline_sink.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dcamera_pipeline_sink.h" + +#include "distributed_hardware_log.h" + +#include "encode_data_process.h" + +namespace OHOS { +namespace DistributedHardware { +const std::string DCameraPipelineSink::PIPELINE_OWNER = "Sink"; + +DCameraPipelineSink::~DCameraPipelineSink() +{ + if (isProcess_) { + DHLOGD("~DCameraPipelineSink : Destroy sink data process pipeline."); + DestroyDataProcessPipeline(); + } +} + +int32_t DCameraPipelineSink::CreateDataProcessPipeline(PipelineType piplineType, + const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig, + const std::shared_ptr& listener) +{ + DHLOGD("Create sink data process pipeline."); + switch (piplineType) { + case PipelineType::VIDEO: + if (!(IsInRange(sourceConfig) && IsInRange(targetConfig))) { + DHLOGE("Source config or target config of sink pipeline are invalid."); + return DCAMERA_BAD_VALUE; + } + break; + default: + DHLOGE("JPEG or other pipeline type are not supported in sink pipeline."); + return DCAMERA_NOT_FOUND; + } + if (listener == nullptr) { + DHLOGE("The process listener of sink pipeline is empty."); + return DCAMERA_BAD_VALUE; + } + if (pipelineHead_ != nullptr) { + DHLOGD("The sink pipeline already exists."); + return DCAMERA_OK; + } + + int32_t err = InitDCameraPipNodes(sourceConfig, targetConfig); + if (err != DCAMERA_OK) { + DestroyDataProcessPipeline(); + return err; + } + piplineType_ = piplineType; + processListener_ = listener; + isProcess_ = true; + return DCAMERA_OK; +} + +bool DCameraPipelineSink::IsInRange(const VideoConfigParams& curConfig) +{ + return (curConfig.GetFrameRate() <= MAX_FRAME_RATE || curConfig.GetWidth() >= MIN_VIDEO_WIDTH || + curConfig.GetWidth() <= MAX_VIDEO_WIDTH || curConfig.GetHeight() >= MIN_VIDEO_HEIGHT || + curConfig.GetHeight() <= MAX_VIDEO_HEIGHT); +} + +int32_t DCameraPipelineSink::InitDCameraPipNodes(const VideoConfigParams& sourceConfig, + const VideoConfigParams& targetConfig) +{ + DHLOGD("Init sink DCamera pipeline Nodes."); + if (piplineType_ == PipelineType::PHOTO_JPEG) { + DHLOGE("JPEG data process is not supported."); + return DCAMERA_NOT_FOUND; + } + + pipNodeRanks_.push_back(std::make_shared(sourceConfig, targetConfig, shared_from_this())); + if (pipNodeRanks_.size() == 0) { + DHLOGD("Creating an empty sink pipeline."); + pipelineHead_ = nullptr; + return DCAMERA_BAD_VALUE; + } + for (size_t i = 0; i < pipNodeRanks_.size(); i++) { + pipNodeRanks_[i]->SetNodeRank(i); + int32_t err = pipNodeRanks_[i]->InitNode(); + if (err != DCAMERA_OK) { + DHLOGE("Init sink DCamera pipeline Node [%d] failed.", i); + return DCAMERA_INIT_ERR; + } + if (i == 0) { + continue; + } + err = pipNodeRanks_[i - 1]->SetNextNode(pipNodeRanks_[i]); + if (err != DCAMERA_OK) { + DHLOGE("Set the next node of Node [%d] failed in sink pipeline.", i - 1); + return DCAMERA_INIT_ERR; + } + } + DHLOGD("All nodes have been linked in sink pipeline."); + pipelineHead_ = pipNodeRanks_[0]; + return DCAMERA_OK; +} + +int32_t DCameraPipelineSink::ProcessData(std::vector>& dataBuffers) +{ + DHLOGD("Process data buffers in sink pipeline."); + if (piplineType_ == PipelineType::PHOTO_JPEG) { + DHLOGE("JPEG data process is not supported in sink pipeline."); + return DCAMERA_NOT_FOUND; + } + if (pipelineHead_ == nullptr) { + DHLOGE("The current sink pipeline node is empty. Processing failed."); + return DCAMERA_INIT_ERR; + } + if (dataBuffers.empty()) { + DHLOGE("Sink Pipeline Input Data buffers is null."); + return DCAMERA_BAD_VALUE; + } + if (!isProcess_) { + DHLOGE("Sink pipeline node occurred error or start destroy."); + return DCAMERA_DISABLE_PROCESS; + } + + int32_t err = pipelineHead_->ProcessData(dataBuffers); + if (err != DCAMERA_OK) { + DHLOGE("Sink plpeline process data buffers fail."); + } + return err; +} + +void DCameraPipelineSink::DestroyDataProcessPipeline() +{ + DHLOGD("Destroy sink data process pipeline start."); + isProcess_ = false; + if (pipelineHead_ != nullptr) { + pipelineHead_->ReleaseProcessNode(); + pipelineHead_ = nullptr; + } + + pipNodeRanks_.clear(); + piplineType_ = PipelineType::VIDEO; + processListener_ = nullptr; + DHLOGD("Destroy sink data process pipeline end."); +} + +void DCameraPipelineSink::OnError(DataProcessErrorType errorType) +{ + DHLOGE("A runtime error occurred in sink pipeline."); + isProcess_ = false; + if (processListener_ == nullptr) { + DHLOGE("The process listener of sink pipeline is empty."); + return; + } + processListener_->OnError(errorType); +} + +void DCameraPipelineSink::OnProcessedVideoBuffer(const std::shared_ptr& videoResult) +{ + DHLOGD("Sink pipeline output the processed video buffer."); + if (processListener_ == nullptr) { + DHLOGE("The process listener of sink pipeline is empty."); + return; + } + processListener_->OnProcessedVideoBuffer(videoResult); +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline/dcamera_pipeline_source.cpp b/services/data_process_yuan/src/pipeline/dcamera_pipeline_source.cpp new file mode 100644 index 00000000..ddb80e5b --- /dev/null +++ b/services/data_process_yuan/src/pipeline/dcamera_pipeline_source.cpp @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dcamera_pipeline_source.h" + +#include "distributed_hardware_log.h" + +#include "decode_data_process.h" +#include "fps_controller_process.h" + +namespace OHOS { +namespace DistributedHardware { +const std::string DCameraPipelineSource::PIPELINE_OWNER = "Source"; + +DCameraPipelineSource::~DCameraPipelineSource() +{ + if (isProcess_) { + DHLOGD("~DCameraPipelineSource : Destroy source data process pipeline."); + DestroyDataProcessPipeline(); + } +} + +int32_t DCameraPipelineSource::CreateDataProcessPipeline(PipelineType piplineType, + const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig, + const std::shared_ptr& listener) +{ + DHLOGD("Create source data process pipeline."); + switch (piplineType) { + case PipelineType::VIDEO: + if (!(IsInRange(sourceConfig) && IsInRange(targetConfig))) { + DHLOGE("Source config or target config of source pipeline are invalid."); + return DCAMERA_BAD_VALUE; + } + break; + default: + DHLOGE("JPEG or other pipeline type are not supported in source pipeline."); + return DCAMERA_NOT_FOUND; + } + if (listener == nullptr) { + DHLOGE("The process listener of source pipeline is empty."); + return DCAMERA_BAD_VALUE; + } + + if (pipelineHead_ != nullptr) { + DHLOGD("The source pipeline already exists."); + return DCAMERA_OK; + } + + InitDCameraPipEvent(); + int32_t err = InitDCameraPipNodes(sourceConfig, targetConfig); + if (err != DCAMERA_OK) { + DestroyDataProcessPipeline(); + return err; + } + piplineType_ = piplineType; + processListener_ = listener; + isProcess_ = true; + return DCAMERA_OK; +} + +bool DCameraPipelineSource::IsInRange(const VideoConfigParams& curConfig) +{ + return (curConfig.GetFrameRate() <= MAX_FRAME_RATE || curConfig.GetWidth() >= MIN_VIDEO_WIDTH || + curConfig.GetWidth() <= MAX_VIDEO_WIDTH || curConfig.GetHeight() >= MIN_VIDEO_HEIGHT || + curConfig.GetHeight() <= MAX_VIDEO_HEIGHT); +} + +void DCameraPipelineSource::InitDCameraPipEvent() +{ + DHLOGD("Init source DCamera pipeline event to asynchronously process data."); + eventBusSource_ = std::make_shared(); + DCameraPipelineEvent pipelineEvent(*this, std::make_shared()); + eventBusSource_->AddHandler(pipelineEvent.GetType(), *this); +} + +int32_t DCameraPipelineSource::InitDCameraPipNodes(const VideoConfigParams& sourceConfig, + const VideoConfigParams& targetConfig) +{ + DHLOGD("Init source DCamera pipeline Nodes."); + if (piplineType_ == PipelineType::PHOTO_JPEG) { + DHLOGE("JPEG data process is not supported."); + return DCAMERA_NOT_FOUND; + } + if (eventBusSource_ == nullptr) { + DHLOGE("eventBusSource is nullptr."); + return DCAMERA_BAD_VALUE; + } + pipNodeRanks_.push_back(std::make_shared(sourceConfig, targetConfig, + eventBusSource_, shared_from_this())); + if (pipNodeRanks_.size() == 0) { + DHLOGD("Creating an empty source pipeline."); + pipelineHead_ = nullptr; + return DCAMERA_BAD_VALUE; + } + for (size_t i = 0; i < pipNodeRanks_.size(); i++) { + pipNodeRanks_[i]->SetNodeRank(i); + int32_t err = pipNodeRanks_[i]->InitNode(); + if (err != DCAMERA_OK) { + DHLOGE("Init source DCamera pipeline Node [%d] failed.", i); + return DCAMERA_INIT_ERR; + } + if (i == 0) { + continue; + } + err = pipNodeRanks_[i - 1]->SetNextNode(pipNodeRanks_[i]); + if (err != DCAMERA_OK) { + DHLOGE("Set the next node of Node [%d] failed in source pipeline.", i - 1); + return DCAMERA_INIT_ERR; + } + } + DHLOGD("All nodes have been linked in source pipeline."); + pipelineHead_ = pipNodeRanks_[0]; + return DCAMERA_OK; +} + +int32_t DCameraPipelineSource::ProcessData(std::vector>& dataBuffers) +{ + DHLOGD("Process data buffers in source pipeline."); + if (piplineType_ == PipelineType::PHOTO_JPEG) { + DHLOGE("JPEG data process is not supported in source pipeline."); + return DCAMERA_NOT_FOUND; + } + if (pipelineHead_ == nullptr) { + DHLOGE("The current source pipeline node is empty. Processing failed."); + return DCAMERA_INIT_ERR; + } + if (dataBuffers.empty()) { + DHLOGE("Source Pipeline Input data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + if (!isProcess_) { + DHLOGE("Source Pipeline node occurred error or start destroy."); + return DCAMERA_DISABLE_PROCESS; + } + + DHLOGD("Send asynchronous event to process data in source pipeline."); + std::shared_ptr pipConfigSource = std::make_shared(piplineType_, + PIPELINE_OWNER, dataBuffers); + DCameraPipelineEvent dCamPipelineEvent(*this, pipConfigSource); + if (eventBusSource_ == nullptr) { + DHLOGE("eventBusSource_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + eventBusSource_->PostEvent(dCamPipelineEvent, POSTMODE::POST_ASYNC); + return DCAMERA_OK; +} + +void DCameraPipelineSource::DestroyDataProcessPipeline() +{ + DHLOGD("Destroy source data process pipeline start."); + isProcess_ = false; + if (pipelineHead_ != nullptr) { + pipelineHead_->ReleaseProcessNode(); + pipelineHead_ = nullptr; + } + eventBusSource_ = nullptr; + processListener_ = nullptr; + pipNodeRanks_.clear(); + piplineType_ = PipelineType::VIDEO; + DHLOGD("Destroy source data process pipeline end."); +} + +void DCameraPipelineSource::OnEvent(DCameraPipelineEvent& ev) +{ + DHLOGD("Receive asynchronous event then start process data in source pipeline."); + std::shared_ptr pipelineConfig = ev.GetPipelineConfig(); + std::vector> inputBuffers = pipelineConfig->GetDataBuffers(); + if (inputBuffers.empty()) { + DHLOGE("Receiving process data buffers is empty in source pipeline."); + OnError(ERROR_PIPELINE_EVENTBUS); + return; + } + pipelineHead_->ProcessData(inputBuffers); +} + +void DCameraPipelineSource::OnError(DataProcessErrorType errorType) +{ + DHLOGE("A runtime error occurred in the source pipeline."); + isProcess_ = false; + if (processListener_ == nullptr) { + DHLOGE("The process listener of source pipeline is empty."); + return; + } + processListener_->OnError(errorType); +} + +void DCameraPipelineSource::OnProcessedVideoBuffer(const std::shared_ptr& videoResult) +{ + DHLOGD("Source pipeline output the processed video buffer."); + if (processListener_ == nullptr) { + DHLOGE("The process listener of source pipeline is empty."); + return; + } + processListener_->OnProcessedVideoBuffer(videoResult); +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp b/services/data_process_yuan/src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp new file mode 100644 index 00000000..3a023eff --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/colorspace_conversion/convert_nv12_to_nv21.cpp @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "convert_nv12_to_nv21.h" + +#include "distributed_hardware_log.h" + +#include "distributed_camera_errno.h" + +namespace OHOS { +namespace DistributedHardware { +bool ConvertNV12ToNV21::IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig) +{ + return (sourceConfig.GetVideoformat() == Videoformat::NV12 && targetConfig.GetVideoformat() == Videoformat::NV21 && + sourceConfig.GetWidth() == targetConfig.GetWidth() && sourceConfig.GetHeight() == targetConfig.GetHeight()); +} + +int32_t ConvertNV12ToNV21::GetImageUnitInfo(ImageUnitInfo& imgInfo, const std::shared_ptr& imgBuf) +{ + if (imgBuf == nullptr) { + DHLOGE("GetImageUnitInfo failed, imgBuf is nullptr."); + return DCAMERA_BAD_VALUE; + } + + bool findErr = true; + int32_t colorFormat = 0; + findErr = findErr && imgBuf->FindInt32("Videoformat", colorFormat); + if (!findErr) { + DHLOGE("GetImageUnitInfo failed, Videoformat is null."); + return DCAMERA_NOT_FOUND; + } + if (colorFormat != static_cast(Videoformat::YUVI420) && + colorFormat != static_cast(Videoformat::NV12) && + colorFormat != static_cast(Videoformat::NV21)) { + DHLOGE("GetImageUnitInfo failed, colorFormat %d are not supported.", colorFormat); + return DCAMERA_NOT_FOUND; + } + imgInfo.colorFormat = static_cast(colorFormat); + findErr = findErr && imgBuf->FindInt32("width", imgInfo.width); + findErr = findErr && imgBuf->FindInt32("height", imgInfo.height); + findErr = findErr && imgBuf->FindInt32("alignedWidth", imgInfo.alignedWidth); + findErr = findErr && imgBuf->FindInt32("alignedHeight", imgInfo.alignedHeight); + if (!findErr) { + DHLOGE("GetImageUnitInfo failed, width %d, height %d, alignedWidth %d, alignedHeight %d.", + imgInfo.width, imgInfo.height, imgInfo.alignedWidth, imgInfo.alignedHeight); + return DCAMERA_NOT_FOUND; + } + + imgInfo.chromaOffset = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight); + imgInfo.imgSize = imgBuf->Size(); + imgInfo.imgData = imgBuf->Data(); + if (imgInfo.imgData == nullptr) { + DHLOGE("Get the imgData of the imgBuf failed."); + return DCAMERA_BAD_VALUE; + } + DHLOGD("imgBuf info : Videoformat %d, alignedWidth %d, alignedHeight %d, width %d, height %d, chromaOffset %d, " + + "imgSize %d.", imgInfo.colorFormat, imgInfo.width, imgInfo.height, imgInfo.alignedWidth, + imgInfo.alignedHeight, imgInfo.chromaOffset, imgInfo.imgSize); + return DCAMERA_OK; +} + +bool ConvertNV12ToNV21::IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo) +{ + int32_t y2UvRatio = 2; + int32_t bytesPerPixel = 3; + size_t expectedImgSize = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight * + bytesPerPixel / y2UvRatio); + size_t expectedChromaOffset = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight); + return (imgInfo.width <= imgInfo.alignedWidth && imgInfo.height <= imgInfo.alignedHeight && + imgInfo.imgSize >= expectedImgSize && imgInfo.chromaOffset == expectedChromaOffset); +} + +int32_t ConvertNV12ToNV21::CheckColorConvertInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + if (srcImgInfo.imgData == nullptr || dstImgInfo.imgData == nullptr) { + DHLOGE("The imgData of srcImgInfo or the imgData of dstImgInfo are null!"); + return DCAMERA_BAD_VALUE; + } + if (srcImgInfo.colorFormat != Videoformat::NV12 && dstImgInfo.colorFormat != Videoformat::NV21) { + DHLOGE("CopyInfo error : srcImgInfo colorFormat %d, dstImgInfo colorFormat %d.", + srcImgInfo.colorFormat, dstImgInfo.colorFormat); + return DCAMERA_BAD_VALUE; + } + + if (!IsCorrectImageUnitInfo(srcImgInfo)) { + DHLOGE("srcImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + + "imgSize %lld.", srcImgInfo.width, srcImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight, + srcImgInfo.chromaOffset, srcImgInfo.imgSize); + return DCAMERA_BAD_VALUE; + } + if (!IsCorrectImageUnitInfo(dstImgInfo)) { + DHLOGE("dstImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + + "imgSize %lld.", dstImgInfo.width, dstImgInfo.height, dstImgInfo.alignedWidth, dstImgInfo.alignedHeight, + dstImgInfo.chromaOffset, dstImgInfo.imgSize); + return DCAMERA_BAD_VALUE; + } + + if (dstImgInfo.width > srcImgInfo.alignedWidth || dstImgInfo.height > srcImgInfo.alignedHeight) { + DHLOGE("Comparison ImgInfo fail: dstwidth %d, dstheight %d, srcAlignedWidth %d, srcAlignedHeight %d.", + dstImgInfo.width, dstImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight); + return DCAMERA_BAD_VALUE; + } + return DCAMERA_OK; +} + +/** +* @brief Separate a row of srcUVPlane into half a row of dstUPlane and half a row of dstVPlane. For example, +* converts the UVPlane memory arrangement of NV12 to the UV memory arrangement of YUVI420. Note that the +* stride and width of the dstImage must be the same. +*/ +void ConvertNV12ToNV21::SeparateUVPlaneByRow(const uint8_t *srcUVPlane, uint8_t *dstUPlane, uint8_t *dstVPlane, + int32_t srcHalfWidth) +{ + int32_t memoryOffset0 = 0; + int32_t memoryOffset1 = 1; + int32_t memoryOffset2 = 2; + int32_t memoryOffset3 = 3; + int32_t perSeparatebytes = 4; + for (int32_t x = 0; x < srcHalfWidth - 1; x += memoryOffset2) { + dstUPlane[x] = srcUVPlane[memoryOffset0]; + dstUPlane[x + memoryOffset1] = srcUVPlane[memoryOffset2]; + dstVPlane[x] = srcUVPlane[memoryOffset1]; + dstVPlane[x + memoryOffset1] = srcUVPlane[memoryOffset3]; + srcUVPlane += perSeparatebytes; + } + if (static_cast(srcHalfWidth) & 1) { + dstUPlane[srcHalfWidth - 1] = srcUVPlane[memoryOffset0]; + dstVPlane[srcHalfWidth - 1] = srcUVPlane[memoryOffset1]; + } +} + +int32_t ConvertNV12ToNV21::SeparateNV12UVPlane(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + int32_t ret = CheckColorConvertInfo(srcImgInfo, dstImgInfo); + if (ret != DCAMERA_OK) { + DHLOGE("ColorConvert : CheckColorConvertInfo failed."); + return ret; + } + + int32_t y2UvRatio = 2; + uint8_t *srcUVPlane = srcImgInfo.imgData + srcImgInfo.chromaOffset; + int32_t srcUVStride = srcImgInfo.alignedWidth; + uint8_t *dstUPlane = dstImgInfo.imgData + dstImgInfo.chromaOffset; + int32_t dstUStride = dstImgInfo.alignedWidth / y2UvRatio; + uint8_t *dstVPlane = dstUPlane + (dstImgInfo.chromaOffset / y2UvRatio) / y2UvRatio; + int32_t dstVStride = dstImgInfo.alignedWidth / y2UvRatio; + int32_t width = srcImgInfo.width / y2UvRatio; + int32_t height = srcImgInfo.height / y2UvRatio; + DHLOGD("srcUVStride %d, dstUStride %d, dstVStride %d, src half width %d, src half height %d.", + srcUVStride, dstUStride, dstVStride, width, height); + + /* Negative height means invert the image. */ + if (height < 0) { + height = -height; + dstUPlane = dstUPlane + (height - 1) * dstUStride; + dstVPlane = dstVPlane + (height - 1) * dstVStride; + dstUStride = -dstUStride; + dstVStride = -dstVStride; + } + /* No black border of srcImage and dstImage, and the strides of srcImage and dstImage are equal. */ + if (srcUVStride == width * y2UvRatio && dstUStride == width && dstVStride == width) { + SeparateUVPlaneByRow(srcUVPlane, dstUPlane, dstVPlane, width * height); + return DCAMERA_OK; + } + /* Black borders exist in srcImage or dstImage. */ + for (int32_t y = 0; y < height; ++y) { + SeparateUVPlaneByRow(srcUVPlane, dstUPlane, dstVPlane, width); + dstUPlane += dstUStride; + dstVPlane += dstVStride; + srcUVPlane += srcUVStride; + } + return DCAMERA_OK; +} + +/** +* @brief Combine half a row of srcUPlane and half a row of srcVPlane into a row of dstUVPlane. For example, +* converts the UVPlane memory arrangement of YUVI420 to the UV memory arrangement of NV12. Note that the +* stride and width of the srcImage must be the same. +*/ +void ConvertNV12ToNV21::CombineUVPlaneByRow(const uint8_t *srcUPlane, const uint8_t *srcVPlane, uint8_t *dstUVPlane, + int32_t dstHalfWidth) +{ + int32_t memoryOffset0 = 0; + int32_t memoryOffset1 = 1; + int32_t memoryOffset2 = 2; + int32_t memoryOffset3 = 3; + int32_t perCombinebytes = 4; + for (int32_t x = 0; x < dstHalfWidth - 1; x += memoryOffset2) { + dstUVPlane[memoryOffset0] = srcUPlane[x]; + dstUVPlane[memoryOffset1] = srcVPlane[x]; + dstUVPlane[memoryOffset2] = srcUPlane[x + memoryOffset1]; + dstUVPlane[memoryOffset3] = srcVPlane[x + memoryOffset1]; + dstUVPlane += perCombinebytes; + } + if (static_cast(dstHalfWidth) & 1) { + dstUVPlane[memoryOffset0] = srcUPlane[dstHalfWidth - 1]; + dstUVPlane[memoryOffset1] = srcVPlane[dstHalfWidth - 1]; + } +} + +int32_t ConvertNV12ToNV21::CombineNV12UVPlane(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + int32_t ret = CheckColorConvertInfo(srcImgInfo, dstImgInfo); + if (ret != DCAMERA_OK) { + DHLOGE("ColorConvert : CheckColorConvertInfo failed."); + return ret; + } + + int32_t y2UvRatio = 2; + uint8_t *srcVPlane = srcImgInfo.imgData + srcImgInfo.chromaOffset; + int32_t srcVStride = srcImgInfo.alignedWidth / y2UvRatio; + uint8_t *srcUPlane = srcVPlane + (srcImgInfo.chromaOffset / y2UvRatio) / y2UvRatio; + int32_t srcUStride = srcImgInfo.alignedWidth / y2UvRatio; + uint8_t *dstUVPlane = dstImgInfo.imgData + dstImgInfo.chromaOffset; + int32_t dstUVStride = dstImgInfo.alignedWidth; + int32_t width = dstImgInfo.width / y2UvRatio; + int32_t height = dstImgInfo.height / y2UvRatio; + DHLOGD("srcUStride %d, srcVStride %d, dstUVStride %d, dst half width %d, dst half height %d.", + srcUStride, srcVStride, dstUVStride, width, height); + + /* Negative height means invert the image. */ + if (height < 0) { + height = -height; + dstUVPlane = dstUVPlane + (height - 1) * dstUVStride; + dstUVStride = -dstUVStride; + } + /* No black border of srcImage and dstImage, and the strides of srcImage and dstImage are equal. */ + if (srcUStride == width && srcVStride == width && dstUVStride == width * y2UvRatio) { + CombineUVPlaneByRow(srcUPlane, srcVPlane, dstUVPlane, width * height); + return DCAMERA_OK; + } + /* Black borders exist in srcImage or dstImage. */ + for (int32_t y = 0; y < height; ++y) { + CombineUVPlaneByRow(srcUPlane, srcVPlane, dstUVPlane, width); + srcUPlane += srcUStride; + srcVPlane += srcVStride; + dstUVPlane += dstUVStride; + } + return DCAMERA_OK; +} + +int32_t ConvertNV12ToNV21::CopyYPlane(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + int32_t ret = CheckColorConvertInfo(srcImgInfo, dstImgInfo); + if (ret != DCAMERA_OK) { + DHLOGE("ColorConvert : CheckColorConvertInfo failed."); + return ret; + } + + errno_t err = EOK; + size_t totalCopyYPlaneSize = dstImgInfo.alignedWidth * dstImgInfo.height; + if (srcImgInfo.alignedWidth == dstImgInfo.width && dstImgInfo.alignedWidth == dstImgInfo.width) { + /* No black border of srcImage and dstImage, and the strides of srcImage and dstImage are equal. */ + err = memcpy_s(dstImgInfo.imgData, totalCopyYPlaneSize, srcImgInfo.imgData, totalCopyYPlaneSize); + if (err != EOK) { + DHLOGE("ColorConvert : memcpy_s CopyYPlaner failed by Coalesce rows."); + return DCAMERA_MEMORY_OPT_ERROR; + } + } else { + /* Black borders exist in srcImage or dstImage. */ + int32_t srcDataOffset = 0; + int32_t dstDataOffset = 0; + for (int32_t yh = 0; yh < dstImgInfo.height; yh++) { + DHLOGE("ColorConvert : memcpy_s Line[%d] source buffer failed.", yh); + err = memcpy_s(dstImgInfo.imgData + dstDataOffset, totalCopyYPlaneSize - dstDataOffset, + srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); + if (err != EOK) { + DHLOGE("memcpy_s YPlane in line[%d] failed.", yh); + return DCAMERA_MEMORY_OPT_ERROR; + } + dstDataOffset += dstImgInfo.alignedWidth; + srcDataOffset += srcImgInfo.alignedWidth; + } + DHLOGD("ColorConvert :get valid yplane OK, srcImgInfo: alignedWidth %d, width %d, height %d. " + + "dstImgInfo: alignedWidth %d, width %d, height %d. dstDataOffset %d, srcDataOffset %d.", + srcImgInfo.alignedWidth, srcImgInfo.width, srcImgInfo.height, dstImgInfo.alignedWidth, + dstImgInfo.width, dstImgInfo.height, dstDataOffset, srcDataOffset); + } + return DCAMERA_OK; +} + +int32_t ConvertNV12ToNV21::ColorConvertNV12ToNV21(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + int32_t err = CheckColorConvertInfo(srcImgInfo, dstImgInfo); + if (err != DCAMERA_OK) { + DHLOGE("ColorConvertNV12ToNV21 : CheckColorConvertInfo failed."); + return err; + } + err = CopyYPlane(srcImgInfo, dstImgInfo); + if (err != DCAMERA_OK) { + DHLOGE("ColorConvertNV12ToNV21 : CopyYPlane failed."); + return err; + } + + std::shared_ptr tempPlaneYUV = std::make_shared(dstImgInfo.imgSize); + ImageUnitInfo tempImgInfo = dstImgInfo; + tempImgInfo.imgData = tempPlaneYUV->Data(); + SeparateNV12UVPlane(srcImgInfo, tempImgInfo); + CombineNV12UVPlane(tempImgInfo, dstImgInfo); + return DCAMERA_OK; +} + +std::shared_ptr ConvertNV12ToNV21::ProcessData(const std::shared_ptr& srcBuf, + const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig) +{ + if (srcBuf == nullptr) { + DHLOGE("ColorConvertProcessData : srcBuf is null."); + return nullptr; + } + if (!IsConvertible(sourceConfig, targetConfig)) { + DHLOGE("ColorConvertProcessData : Only supported convert videoformat NV12 to NV21."); + DHLOGE("sourceConfig: Videoformat %d Width %d, Height %d, targetConfig: Videoformat %d Width %d, Height %d.", + sourceConfig.GetVideoformat(), sourceConfig.GetWidth(), sourceConfig.GetHeight(), + targetConfig.GetVideoformat(), targetConfig.GetWidth(), targetConfig.GetHeight()); + return nullptr; + } + int64_t timeStamp = 0; + if (!(srcBuf->FindInt64("timeUs", timeStamp))) { + DHLOGE("ColorConvertProcessData : Find srcBuf timeStamp failed."); + return nullptr; + } + + ImageUnitInfo srcImgInfo {Videoformat::YUVI420, 0, 0, 0, 0, 0, 0, nullptr}; + if (GetImageUnitInfo(srcImgInfo, srcBuf) != DCAMERA_OK) { + DHLOGE("ColorConvertProcessData : Get srcImgInfo failed."); + return nullptr; + } + int32_t y2UvRatio = 2; + int32_t bytesPerPixel = 3; + size_t dstBufsize = sourceConfig.GetWidth() * sourceConfig.GetHeight() * bytesPerPixel / y2UvRatio; + std::shared_ptr dstBuf = std::make_shared(dstBufsize); + ImageUnitInfo dstImgInfo = { targetConfig.GetVideoformat(), static_cast(sourceConfig.GetWidth()), + static_cast(sourceConfig.GetHeight()), static_cast(sourceConfig.GetWidth()), + static_cast(sourceConfig.GetHeight()), sourceConfig.GetWidth() * sourceConfig.GetHeight(), + dstBuf->Size(), dstBuf->Data() }; + int32_t err = ColorConvertNV12ToNV21(srcImgInfo, dstImgInfo); + if (err != DCAMERA_OK) { + return nullptr; + } + dstBuf->SetInt64("timeUs", timeStamp); + dstBuf->SetInt32("Videoformat", static_cast(targetConfig.GetVideoformat())); + dstBuf->SetInt32("alignedWidth", static_cast(sourceConfig.GetWidth())); + dstBuf->SetInt32("alignedHeight", static_cast(sourceConfig.GetHeight())); + dstBuf->SetInt32("width", static_cast(sourceConfig.GetWidth())); + dstBuf->SetInt32("height", static_cast(sourceConfig.GetHeight())); + DHLOGD("ColorConvert end, dstBuf Videoformat %d, width %d, height %d, alignedWidth %d, alignedHeight %d, " + + "ImgSize%d, timeUs %lld.", targetConfig.GetVideoformat(), sourceConfig.GetWidth(), sourceConfig.GetHeight(), + sourceConfig.GetWidth(), sourceConfig.GetHeight(), dstBuf->Size(), timeStamp); + return dstBuf; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/fpscontroller/fps_controller_process.cpp b/services/data_process_yuan/src/pipeline_node/fpscontroller/fps_controller_process.cpp new file mode 100644 index 00000000..daadcf5f --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/fpscontroller/fps_controller_process.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "fps_controller_process.h" + +#include "dcamera_utils_tools.h" +#include "distributed_camera_errno.h" +#include "distributed_hardware_log.h" + +namespace OHOS { +namespace DistributedHardware { +FpsControllerProcess::~FpsControllerProcess() +{ + if (isFpsControllerProcess_) { + DHLOGD("~DecodeDataProcess : ReleaseProcessNode."); + ReleaseProcessNode(); + } +} + +int32_t FpsControllerProcess::InitNode() +{ + if (targetConfig_.GetFrameRate() > MAX_TARGET_FRAME_RATE) { + DHLOGE("The target framerate : %d is greater than the max framerate : %d.", + targetConfig_.GetFrameRate(), MAX_TARGET_FRAME_RATE); + return DCAMERA_BAD_TYPE; + } + targetFrameRate_ = targetConfig_.GetFrameRate(); + isFpsControllerProcess_ = true; + return DCAMERA_OK; +} + +void FpsControllerProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : FPS controller.", nodeRank_); + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); + } + + isFpsControllerProcess_ = false; + isFirstFrame_ = false; + targetFrameRate_ = 0; + lastFrameIncomeTimeMs_ = 0; + recentFrameTimeSpanMs_ = -1; + keepCorrectionCount_ = 0; + keepLessThanDoubleCount_ = 0; + keepMoreThanDoubleCount_ = 0; + frameRateCorrectionFactor_ = 0.0; + frameRateOvershootMdf_ = 0; + for (int i = 0; i < INCOME_FRAME_TIME_HISTORY_WINDOWS_SIZE; i++) { + incomingFrameTimesMs_[i] = 0; + } +} + +int32_t FpsControllerProcess::ProcessData(std::vector>& inputBuffers) +{ + if (inputBuffers.empty()) { + DHLOGE("Data buffers is null."); + return DCAMERA_BAD_TYPE; + } + if (!isFpsControllerProcess_) { + DHLOGE("Decoder node occurred error."); + return DCAMERA_DISABLE_PROCESS; + } + int64_t timeStampUs = 0; + if (!inputBuffers[0]->FindInt64("timeUs", timeStampUs)) { + DHLOGE("Find decoder output timestamp failed."); + return DCAMERA_BAD_TYPE; + } + + std::lock_guard lck (mtx); + int64_t nowTimeMs = GetNowTimeStampMs(); + UpdateFPSControllerInfo(nowTimeMs); + + float curFrameRate = CalculateFrameRate(nowTimeMs); + if (IsDropFrame(curFrameRate)) { + DHLOGD("frame control, currect frameRate %u, targetRate %u, drop it", curFrameRate, targetFrameRate_); + return DCAMERA_OK; + } + + DHLOGD("frame control render PushVideoFrame, frame info width %d height %d, timeStampUs %lld, fps %d", + sourceConfig_.GetWidth(), sourceConfig_.GetHeight(), (long long)timeStampUs, curFrameRate); + return FpsControllerDone(inputBuffers); +} + +void FpsControllerProcess::UpdateFPSControllerInfo(int64_t nowMs) +{ + DHLOGD("Frame control, update control info."); + if (targetFrameRate_ <= 0) { + DHLOGD("Frame control, targetFrameRate_ : %d", targetFrameRate_); + return; + } + + isFirstFrame_ = false; + if (lastFrameIncomeTimeMs_ == 0) { + DHLOGD("Frame control, income fisrt frame."); + isFirstFrame_ = true; + } + lastFrameIncomeTimeMs_ = nowMs; + recentFrameTimeSpanMs_ = nowMs - lastFrameIncomeTimeMs_; + DHLOGD("Frame control, lastFrameIncomeTimeMs_ %lld, receive Frame after last frame(ms): %lld", + (long long)lastFrameIncomeTimeMs_, (long long)recentFrameTimeSpanMs_); + UpdateIncomingFrameTimes(nowMs); + UpdateFrameRateCorrectionFactor(nowMs); + return; +} + +void FpsControllerProcess::UpdateFrameRateCorrectionFactor(int64_t nowMs) +{ + DHLOGD("Frame control, update FPS correction factor."); + if (targetFrameRate_ <= 0) { + DHLOGD("Frame control, targetFrameRate_ : %d", targetFrameRate_); + return; + } + if (isFirstFrame_) { + DHLOGD("No frame rate correction factor when the first frame."); + return; + } + + const float minDropFrmValue = 0.5; + const float maxDropFrmValue = 1.0; + const float msPerSecond = 1000; + const float maxInstantaneousFrameRateCoefficient = 1.1; + float maxInstantaneousFrameRateThreshold = targetFrameRate_ * maxInstantaneousFrameRateCoefficient; + float instantaneousFrameRate = msPerSecond / recentFrameTimeSpanMs_; + if (instantaneousFrameRate < 0) { + instantaneousFrameRate = -instantaneousFrameRate; + } + if (instantaneousFrameRate <= maxInstantaneousFrameRateThreshold) { + frameRateCorrectionFactor_ = minDropFrmValue; + } else { + if (keepCorrectionCount_ >= VIDEO_FRAME_DROP_INTERVAL) { + frameRateCorrectionFactor_ = maxDropFrmValue; + keepCorrectionCount_ = 0; + } else { + frameRateCorrectionFactor_ = 0; + keepCorrectionCount_++; + } + DHLOGD("Frame control, instantaneousFrameRate %.3f is more than maxInstantaneousFrameRateThreshold %.3f, " + + "keepCorrectionCount %d", instantaneousFrameRate, maxInstantaneousFrameRateThreshold, + keepCorrectionCount_); + } + + DHLOGD("Frame control, targetFramerate %d, maxInstantaneousFrameRateThreshold %.3f," + + "instantaneousFrameRate %.3f, frameRateCorrectionFactor %.3f", targetFrameRate_, + maxInstantaneousFrameRateThreshold, instantaneousFrameRate, frameRateCorrectionFactor_); + return; +} + +void FpsControllerProcess::UpdateIncomingFrameTimes(int64_t nowMs) +{ + DHLOGD("Frame control, update incoming frame times array."); + if (targetFrameRate_ <= 0) { + DHLOGD("Frame control, targetFrameRate_ : %d", targetFrameRate_); + return; + } + if (isFirstFrame_) { + incomingFrameTimesMs_[0] = nowMs; + return; + } + + int64_t intervalNewAndFirst = nowMs - incomingFrameTimesMs_[0]; + if (intervalNewAndFirst < 0) { + intervalNewAndFirst = -intervalNewAndFirst; + } + if (intervalNewAndFirst > FRMAE_MAX_INTERVAL_TIME_WINDOW_MS) { + DHLOGD("frame control, nowMs: %lld mIncomingFrameT[0]: %lld intervalNewAndFirst: %lld", + (long long)nowMs, (long long)incomingFrameTimesMs_[0], (long long)intervalNewAndFirst); + for (int i = 0; i < INCOME_FRAME_TIME_HISTORY_WINDOWS_SIZE; i++) { + incomingFrameTimesMs_[i] = 0; + } + } else { + DHLOGD("frame control shift, nowMs: %lld mIncomingFrameT[0]: %lld intervalNewAndFirst: %lld", + (long long)nowMs, (long long)incomingFrameTimesMs_[0], (long long)intervalNewAndFirst); + const int32_t windowLeftNum = 2; + for (int i = (INCOME_FRAME_TIME_HISTORY_WINDOWS_SIZE - windowLeftNum); i >= 0; --i) { + incomingFrameTimesMs_[i + 1] = incomingFrameTimesMs_[i]; + } + } + incomingFrameTimesMs_[0] = nowMs; + return; +} + +float FpsControllerProcess::CalculateFrameRate(int64_t nowMs) +{ + DHLOGD("Frame control, calculate frame rate."); + if (targetFrameRate_ <= 0) { + DHLOGE("Frame control, targetFrameRate_ : %d", targetFrameRate_); + return 0.0; + } + + int32_t num = 0; + int32_t validFramesNumber = 0; + if (nowMs < 0) { + nowMs = -nowMs; + } + for (; num < INCOME_FRAME_TIME_HISTORY_WINDOWS_SIZE; num++) { + if (incomingFrameTimesMs_[num] <= 0 || nowMs - incomingFrameTimesMs_[num] > FRAME_HISTORY_TIME_WINDOWS_MS) { + break; + } else { + validFramesNumber++; + } + } + + const float msPerSecond = 1000; + const int32_t minValidCalculatedFrameRatesNum = 2; + int32_t minIncomingFrameNum = static_cast(targetFrameRate_) / MIN_INCOME_FRAME_NUM_COEFFICIENT; + if (validFramesNumber > minIncomingFrameNum && validFramesNumber > minValidCalculatedFrameRatesNum) { + int64_t validTotalTimeDifference = (nowMs - incomingFrameTimesMs_[num - 1]); + if (validTotalTimeDifference < 0) { + validTotalTimeDifference = -validTotalTimeDifference; + } + if (validTotalTimeDifference > 0) { + return validFramesNumber * msPerSecond / validTotalTimeDifference + frameRateCorrectionFactor_; + } + } + return static_cast(validFramesNumber); +} + +bool FpsControllerProcess::IsDropFrame(float incomingFps) +{ + DHLOGD("Frame control, IsDropFrame"); + if (targetFrameRate_ == 0) { + DHLOGD("target fps is 0, drop all frame."); + return true; + } + if (incomingFps <= 0) { + DHLOGD("incoming fps not more than 0, not drop"); + return false; + } + const int32_t incomingFrmRate = static_cast(incomingFps); + if (incomingFrmRate > static_cast(targetFrameRate_)) { + DHLOGD("incoming fps not more than targetFrameRate_, not drop"); + return false; + } + bool isDrop = ReduceFrameRateByUniformStrategy(incomingFrmRate); + DHLOGD("drop frame result: %s", isDrop ? "drop" : "no drop"); + return isDrop; +} + +bool FpsControllerProcess::ReduceFrameRateByUniformStrategy(int32_t incomingFrmRate) +{ + DHLOGD("Frame control, reduce frame rate by uniform rate strategy"); + if (incomingFrmRate > static_cast(targetFrameRate_)) { + DHLOGD("incoming fps not more than targetFrameRate_, not drop"); + return false; + } + + /* + * When the actual incoming frame rate correction value is greater than the target frame + * rate, the incoming frames are reduced uniformly. + */ + bool isDrop = false; + int32_t overshoot = frameRateOvershootMdf_ + (incomingFrmRate - targetFrameRate_); + if (overshoot < 0) { + overshoot = 0; + frameRateOvershootMdf_ = 0; + } + if (overshoot && DOUBLE_MULTIPLE * overshoot < incomingFrmRate) { + /* + * When the actual input frame rate is less than or equal to twice the target frame rate, + * one frame is dropped every (incomingFrmRate / overshoot) frames. + */ + if (keepMoreThanDoubleCount_) { + keepMoreThanDoubleCount_ = 0; + return true; + } + const int32_t dropVar = incomingFrmRate / overshoot; + if (keepLessThanDoubleCount_ >= dropVar) { + isDrop = true; + frameRateOvershootMdf_ = -(incomingFrmRate % overshoot) / OVERSHOOT_MODIFY_COEFFICIENT; + keepLessThanDoubleCount_ = 1; + } else { + keepLessThanDoubleCount_++; + } + } else { + /* + * When the actual frame rate is more than twice the target frame rate or the overshoot is + * equal to 0, one frame is reserved every (overshoot / targetFrameRate_) frames. + */ + keepLessThanDoubleCount_ = 0; + const int32_t dropVar = overshoot / targetFrameRate_; + if (keepMoreThanDoubleCount_ < dropVar) { + isDrop = true; + keepMoreThanDoubleCount_++; + } else { + frameRateOvershootMdf_ = overshoot % static_cast(targetFrameRate_); + isDrop = false; + keepMoreThanDoubleCount_ = 0; + } + } + return isDrop; +} + +int32_t FpsControllerProcess::FpsControllerDone(std::vector> outputBuffers) +{ + if (outputBuffers.empty()) { + DHLOGE("The received data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + + if (nextDataProcess_ != nullptr) { + DHLOGD("Send to the next node of the FpsController for processing."); + int32_t err = nextDataProcess_->ProcessData(outputBuffers); + if (err != DCAMERA_OK) { + DHLOGE("Someone node after the FpsController processes failed."); + } + return err; + } + DHLOGD("The current node is the last node, and Output the processed video buffer"); + std::shared_ptr targetPipelineSource = callbackPipelineSource_.lock(); + if (targetPipelineSource == nullptr) { + DHLOGE("callbackPipelineSource_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + targetPipelineSource->OnProcessedVideoBuffer(outputBuffers[0]); + return DCAMERA_OK; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process.cpp b/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process.cpp new file mode 100644 index 00000000..da8a0af7 --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process.cpp @@ -0,0 +1,712 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "decode_data_process.h" + +#include "distributed_hardware_log.h" +#include "graphic_common_c.h" + +#include "convert_nv12_to_nv21.h" +#include "dcamera_utils_tools.h" +#include "decode_video_callback.h" + +namespace OHOS { +namespace DistributedHardware { +DecodeDataProcess::~DecodeDataProcess() +{ + if (isDecoderProcess_) { + DHLOGD("~DecodeDataProcess : ReleaseProcessNode."); + ReleaseProcessNode(); + } +} + +int32_t DecodeDataProcess::InitNode() +{ + DHLOGD("Init DCamera DecodeNode start."); + if (!(IsInDecoderRange(sourceConfig_) && IsInDecoderRange(targetConfig_))) { + DHLOGE("Source config or target config are invalid."); + return DCAMERA_BAD_VALUE; + } + if (!IsConvertible(sourceConfig_, targetConfig_)) { + DHLOGE("The DecodeNode can't convert %d to %d.", sourceConfig_.GetVideoCodecType(), + targetConfig_.GetVideoCodecType()); + return DCAMERA_BAD_TYPE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("Disable DecodeNode. The target video codec type %d is the same as the source video codec type %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DCAMERA_OK; + } + + InitCodecEvent(); + int32_t err = InitDecoder(); + if (err != DCAMERA_OK) { + DHLOGE("Init video decoder failed."); + ReleaseProcessNode(); + return err; + } + alignedHeight_ = GetAlignedHeight(); + isDecoderProcess_ = true; + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::GetAlignedHeight() +{ + int32_t alignedBits = 32; + int32_t alignedHeight = static_cast(sourceConfig_.GetHeight()); + if (alignedHeight % alignedBits != 0) { + alignedHeight = ((alignedHeight / alignedBits) + 1) * alignedBits; + } + return alignedHeight; +} + +bool DecodeDataProcess::IsInDecoderRange(const VideoConfigParams& curConfig) +{ + return (curConfig.GetWidth() >= MIN_VIDEO_WIDTH || curConfig.GetWidth() <= MAX_VIDEO_WIDTH || + curConfig.GetHeight() >= MIN_VIDEO_HEIGHT || curConfig.GetHeight() <= MAX_VIDEO_HEIGHT || + curConfig.GetFrameRate() <= MAX_FRAME_RATE); +} + +bool DecodeDataProcess::IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig) +{ + return (sourceConfig.GetVideoCodecType() == targetConfig.GetVideoCodecType() || + targetConfig.GetVideoCodecType() == VideoCodecType::NO_CODEC); +} + +void DecodeDataProcess::InitCodecEvent() +{ + DHLOGD("Init DecodeNode eventBus, and add handler for it."); + eventBusDecode_ = std::make_shared(); + DCameraCodecEvent codecEvent(*this, std::make_shared()); + eventBusRegHandleDecode_ = eventBusDecode_->AddHandler(codecEvent.GetType(), *this); + + DHLOGD("Add handler for DCamera pipeline eventBus."); + eventBusRegHandlePipeline2Decode_ = eventBusPipeline_->AddHandler(codecEvent.GetType(), *this); +} + +int32_t DecodeDataProcess::InitDecoder() +{ + DHLOGD("Init video decoder."); + int32_t err = InitDecoderMetadataFormat(); + if (err != DCAMERA_OK) { + DHLOGE("Init video decoder metadata format failed."); + return err; + } + + videoDecoder_ = Media::VideoDecoderFactory::CreateByMime(processType_); + if (videoDecoder_ == nullptr) { + DHLOGE("Create video decoder failed."); + return DCAMERA_INIT_ERR; + } + decodeVideoCallback_ = std::make_shared(shared_from_this()); + int32_t retVal = videoDecoder_->SetCallback(decodeVideoCallback_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video decoder callback failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoDecoder_->Configure(metadataFormat_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video decoder metadata format failed."); + return DCAMERA_INIT_ERR; + } + retVal = SetDecoderOutputSurface(); + if (retVal != DCAMERA_OK) { + DHLOGE("Set decoder output surface failed."); + return retVal; + } + + retVal = videoDecoder_->Prepare(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video decoder prepare failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoDecoder_->Start(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video decoder start failed."); + return DCAMERA_INIT_ERR; + } + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::InitDecoderMetadataFormat() +{ + DHLOGD("Init video decoder metadata format."); + switch (sourceConfig_.GetVideoCodecType()) { + case VideoCodecType::CODEC_H264: + processType_ = "video/avc"; + metadataFormat_.PutStringValue("codec_mime", processType_); + break; + case VideoCodecType::CODEC_H265: + processType_ = "video/hevc"; + metadataFormat_.PutStringValue("codec_mime", processType_); + break; + default: + DHLOGE("The current codec type does not support decoding."); + return DCAMERA_NOT_FOUND; + } + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::NV12); + metadataFormat_.PutIntValue("max_input_size", MAX_YUV420_BUFFER_SIZE); + metadataFormat_.PutIntValue("width", (int32_t)sourceConfig_.GetWidth()); + metadataFormat_.PutIntValue("height", (int32_t)sourceConfig_.GetHeight()); + metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::SetDecoderOutputSurface() +{ + DHLOGD("Set the video decoder output surface."); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder is null."); + return DCAMERA_BAD_VALUE; + } + + decodeConsumerSurface_ = Surface::CreateSurfaceAsConsumer(); + if (decodeConsumerSurface_ == nullptr) { + DHLOGE("Create the decode consumer surface failed."); + return DCAMERA_INIT_ERR; + } + decodeConsumerSurface_->SetDefaultWidthAndHeight((int32_t)sourceConfig_.GetWidth(), + (int32_t)sourceConfig_.GetHeight()); + decodeSurfaceListener_ = new DecodeSurfaceListener(decodeConsumerSurface_, shared_from_this()); + if (decodeConsumerSurface_->RegisterConsumerListener(decodeSurfaceListener_) != + SURFACE_ERROR_OK) { + DHLOGE("Register consumer listener failed."); + return DCAMERA_INIT_ERR; + } + + sptr surfaceProducer = decodeConsumerSurface_->GetProducer(); + if (surfaceProducer == nullptr) { + DHLOGE("Get the surface producer of the decode consumer surface failed."); + return DCAMERA_INIT_ERR; + } + decodeProducerSurface_ = Surface::CreateSurfaceAsProducer(surfaceProducer); + if (decodeProducerSurface_ == nullptr) { + DHLOGE("Create the decode producer surface of the decode consumer surface failed."); + return DCAMERA_INIT_ERR; + } + + DHLOGD("Set the producer surface to video decoder output surface."); + int32_t err = videoDecoder_->SetOutputSurface(decodeProducerSurface_); + if (err != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set decoder output surface failed."); + return DCAMERA_INIT_ERR; + } + return DCAMERA_OK; +} + +void DecodeDataProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : DecodeNode.", nodeRank_); + isDecoderProcess_ = false; + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); + } + if (eventBusDecode_ != nullptr && eventBusPipeline_ != nullptr) { + DHLOGD("Start release DecodeNode eventBusDecode_ and eventBusPipeline_."); + DCameraCodecEvent codecEvent(*this, std::make_shared()); + eventBusDecode_->RemoveHandler(codecEvent.GetType(), eventBusRegHandleDecode_); + eventBusDecode_ = nullptr; + eventBusPipeline_->RemoveHandler(codecEvent.GetType(), eventBusRegHandlePipeline2Decode_); + eventBusPipeline_ = nullptr; + } + + { + std::lock_guard lck(mtxDecoderState_); + if (videoDecoder_ != nullptr) { + DHLOGD("Start release videoDecoder."); + videoDecoder_->Flush(); + videoDecoder_->Stop(); + videoDecoder_->Release(); + videoDecoder_ = nullptr; + decodeVideoCallback_ = nullptr; + } + } + if (decodeConsumerSurface_ != nullptr) { + int32_t ret = decodeConsumerSurface_->UnregisterConsumerListener(); + if (ret != SURFACE_ERROR_OK) { + DHLOGE("Unregister consumer listener failed. Error type: %d.", ret); + } + decodeConsumerSurface_ = nullptr; + decodeProducerSurface_ = nullptr; + decodeSurfaceListener_ = nullptr; + } + + processType_ = ""; + std::queue> emptyBuffersQueue; + inputBuffersQueue_.swap(emptyBuffersQueue); + std::queue emptyIndexsQueue; + availableInputIndexsQueue_.swap(emptyIndexsQueue); + waitDecoderOutputCount_ = 0; + lastFeedDecoderInputBufferTimeUs_ = 0; + outputTimeStampUs_ = 0; + alignedHeight_ = 0; + DHLOGD("Release [%d] node : DecodeNode end.", nodeRank_); +} + +int32_t DecodeDataProcess::ProcessData(std::vector>& inputBuffers) +{ + DHLOGD("Process data in DecodeDataProcess."); + if (inputBuffers.empty()) { + DHLOGE("The input data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("The target VideoCodecType : %d is the same as the source VideoCodecType : %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DecodeDone(inputBuffers); + } + + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before decoding data."); + return DCAMERA_INIT_ERR; + } + if (inputBuffersQueue_.size() > VIDEO_DECODER_QUEUE_MAX) { + DHLOGE("video decoder input buffers queue over flow."); + return DCAMERA_INDEX_OVERFLOW; + } + if (inputBuffers[0]->Size() > MAX_YUV420_BUFFER_SIZE) { + DHLOGE("DecodeNode input buffer size %zu error.", inputBuffers[0]->Size()); + return DCAMERA_MEMORY_OPT_ERROR; + } + if (!isDecoderProcess_) { + DHLOGE("Decoder node occurred error or start release."); + return DCAMERA_DISABLE_PROCESS; + } + inputBuffersQueue_.push(inputBuffers[0]); + DHLOGD("Push inputBuf sucess. BufSize %zu, QueueSize %zu.", inputBuffers[0]->Size(), inputBuffersQueue_.size()); + int32_t err = FeedDecoderInputBuffer(); + if (err != DCAMERA_OK) { + int32_t sleepTimeUs = 5000; + std::this_thread::sleep_for(std::chrono::microseconds(sleepTimeUs)); + DHLOGD("Feed decoder input buffer failed. Try FeedDecoderInputBuffer again."); + std::shared_ptr reFeedInputPacket = std::make_shared(); + reFeedInputPacket->SetVideoCodecType(sourceConfig_.GetVideoCodecType()); + DCameraCodecEvent dCamCodecEv(*this, reFeedInputPacket, VideoCodecAction::ACTION_ONCE_AGAIN); + if (eventBusPipeline_ == nullptr) { + DHLOGE("eventBusPipeline_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + eventBusPipeline_->PostEvent(dCamCodecEv, POSTMODE::POST_ASYNC); + } + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::FeedDecoderInputBuffer() +{ + DHLOGD("Feed decoder input buffer."); + while ((!inputBuffersQueue_.empty()) && (isDecoderProcess_)) { + std::shared_ptr buffer = inputBuffersQueue_.front(); + if (buffer == nullptr || availableInputIndexsQueue_.empty()) { + DHLOGE("inputBuffersQueue size %zu, availableInputIndexsQueue size %zu.", + inputBuffersQueue_.size(), availableInputIndexsQueue_.size()); + return DCAMERA_BAD_VALUE; + } + + { + std::lock_guard lck(mtxDecoderState_); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before GetInputBuffer."); + return DCAMERA_OK; + } + uint32_t index = availableInputIndexsQueue_.front(); + std::shared_ptr sharedMemoryInput = videoDecoder_->GetInputBuffer(index); + if (sharedMemoryInput == nullptr) { + DHLOGE("Failed to obtain the input shared memory corresponding to the [%u] index.", index); + return DCAMERA_BAD_VALUE; + } + size_t inputMemoDataSize = static_cast(sharedMemoryInput->GetSize()); + errno_t err = memcpy_s(sharedMemoryInput->GetBase(), inputMemoDataSize, buffer->Data(), buffer->Size()); + if (err != EOK) { + DHLOGE("memcpy_s buffer failed."); + return DCAMERA_MEMORY_OPT_ERROR; + } + int64_t timeUs = GetDecoderTimeStamp(); + DHLOGD("Decoder input buffer size %zu, timeStamp %lld.", buffer->Size(), (long long)timeUs); + Media::AVCodecBufferInfo bufferInfo {timeUs, static_cast(buffer->Size()), 0}; + int32_t ret = videoDecoder_->QueueInputBuffer(index, bufferInfo, + Media::AVCODEC_BUFFER_FLAG_NONE); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("queue Input buffer failed."); + return DCAMERA_BAD_OPERATE; + } + } + + inputBuffersQueue_.pop(); + DHLOGD("Push inputBuffer sucess. inputBuffersQueue size is %d.", inputBuffersQueue_.size()); + + { + std::lock_guard lck(mtxHoldCount_); + availableInputIndexsQueue_.pop(); + waitDecoderOutputCount_++; + DHLOGD("Wait decoder output frames number is %d.", waitDecoderOutputCount_); + } + } + return DCAMERA_OK; +} + +int64_t DecodeDataProcess::GetDecoderTimeStamp() +{ + int64_t TimeDifferenceStampUs = 0; + int64_t nowTimeUs = GetNowTimeStampUs(); + if (lastFeedDecoderInputBufferTimeUs_ == 0) { + lastFeedDecoderInputBufferTimeUs_ = nowTimeUs; + return TimeDifferenceStampUs; + } + TimeDifferenceStampUs = nowTimeUs - lastFeedDecoderInputBufferTimeUs_; + lastFeedDecoderInputBufferTimeUs_ = nowTimeUs; + return TimeDifferenceStampUs; +} + +void DecodeDataProcess::GetDecoderOutputBuffer(const sptr& surface) +{ + DHLOGD("Get decoder output buffer."); + if (surface == nullptr) { + DHLOGE("Get decode consumer surface failed."); + return; + } + Rect damage = {0, 0, 0, 0}; + int32_t acquireFence = 0; + int64_t timeStampUs = 0; + sptr surfaceBuffer = nullptr; + GSError ret = surface->AcquireBuffer(surfaceBuffer, acquireFence, timeStampUs, damage); + if (ret != GSERROR_OK || surfaceBuffer == nullptr) { + DHLOGE("Acquire surface buffer failed!"); + return; + } + int32_t alignedWidth = surfaceBuffer->GetStride(); + int32_t alignedHeight = alignedHeight_; + DHLOGD("OutputBuffer alignedWidth %d, alignedHeight %d, TimeUs %lld.", alignedWidth, alignedHeight, timeStampUs); + CopyDecodedImage(surfaceBuffer, timeStampUs, alignedWidth, alignedHeight); + surface->ReleaseBuffer(surfaceBuffer, -1); + outputTimeStampUs_ = timeStampUs; + { + std::lock_guard lck(mtxHoldCount_); + if (waitDecoderOutputCount_ <= 0) { + DHLOGE("The waitDecoderOutputCount_ = %d.", waitDecoderOutputCount_); + } + if (outputTimeStampUs_ == 0) { + waitDecoderOutputCount_ -= FIRST_FRAME_INPUT_NUM; + } else { + waitDecoderOutputCount_--; + } + DHLOGD("Wait decoder output frames number is %d.", waitDecoderOutputCount_); + } +} + +void DecodeDataProcess::CopyDecodedImage(const sptr& surBuf, int64_t timeStampUs, int32_t alignedWidth, + int32_t alignedHeight) +{ + if (surBuf == nullptr) { + DHLOGE("surface buffer is null!"); + return; + } + int32_t y2UvRatio = 2; + int32_t bytesPerPixel = 3; + size_t validDecodedImageAlignedSize = static_cast(alignedWidth * alignedHeight * + bytesPerPixel / y2UvRatio); + size_t validDecodedImageSize = static_cast(sourceConfig_.GetWidth() * sourceConfig_.GetHeight() * + bytesPerPixel / y2UvRatio); + size_t surfaceBufSize = static_cast(surBuf->GetSize()); + if (validDecodedImageAlignedSize > surfaceBufSize || validDecodedImageAlignedSize < validDecodedImageSize) { + DHLOGE("Buffer size error, validDecodedImageSize %zu, validDecodedImageAlignedSize %zu, surBufSize %zu.", + validDecodedImageSize, validDecodedImageAlignedSize, surBuf->GetSize()); + return; + } + std::shared_ptr bufferOutput = std::make_shared(validDecodedImageSize); + uint8_t *addr = static_cast(surBuf->GetVirAddr()); + if (alignedWidth == static_cast(sourceConfig_.GetWidth()) && + alignedHeight == static_cast(sourceConfig_.GetHeight())) { + errno_t err = memcpy_s(bufferOutput->Data(), bufferOutput->Size(), addr, validDecodedImageSize); + if (err != EOK) { + DHLOGE("memcpy_s surface buffer failed."); + return; + } + } else { + ImageUnitInfo srcImgInfo = { sourceConfig_.GetVideoformat(), static_cast(sourceConfig_.GetWidth()), + static_cast(sourceConfig_.GetHeight()), alignedWidth, alignedHeight, + static_cast(alignedWidth * alignedHeight), surfaceBufSize, addr }; + ImageUnitInfo dstImgInfo = { sourceConfig_.GetVideoformat(), static_cast(sourceConfig_.GetWidth()), + static_cast(sourceConfig_.GetHeight()), static_cast(sourceConfig_.GetWidth()), + static_cast(sourceConfig_.GetHeight()), sourceConfig_.GetWidth() * sourceConfig_.GetHeight(), + bufferOutput->Size(), bufferOutput->Data() }; + int32_t retRow = CopyYUVPlaneByRow(srcImgInfo, dstImgInfo); + if (retRow != DCAMERA_OK) { + DHLOGE("memcpy_s surface buffer failed."); + return; + } + } + bufferOutput->SetInt64("timeUs", timeStampUs); + bufferOutput->SetInt32("Videoformat", static_cast(sourceConfig_.GetVideoformat())); + bufferOutput->SetInt32("alignedWidth", static_cast(sourceConfig_.GetWidth())); + bufferOutput->SetInt32("alignedHeight", static_cast(sourceConfig_.GetHeight())); + bufferOutput->SetInt32("width", static_cast(sourceConfig_.GetWidth())); + bufferOutput->SetInt32("height", static_cast(sourceConfig_.GetHeight())); + PostOutputDataBuffers(bufferOutput); +} + +int32_t DecodeDataProcess::CopyYUVPlaneByRow(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + int32_t ret = CheckCopyImageInfo(srcImgInfo, dstImgInfo); + if (ret != DCAMERA_OK) { + DHLOGE("Check CopyImageUnitInfo failed."); + return ret; + } + errno_t err = EOK; + int32_t srcDataOffset = 0; + int32_t dstDataOffset = 0; + for (int32_t yh = 0; yh < dstImgInfo.height; yh++) { + err = memcpy_s(dstImgInfo.imgData + dstDataOffset, dstImgInfo.chromaOffset - dstDataOffset, + srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); + if (err != EOK) { + DHLOGE("memcpy_s YPlane in line[%d] failed.", yh); + return DCAMERA_MEMORY_OPT_ERROR; + } + dstDataOffset += dstImgInfo.alignedWidth; + srcDataOffset += srcImgInfo.alignedWidth; + } + DHLOGD("Copy Yplane end, dstDataOffset %d, srcDataOffset %d, validYPlaneSize %d.", + dstDataOffset, srcDataOffset, dstImgInfo.chromaOffset); + + int32_t y2UvRatio = 2; + dstDataOffset = dstImgInfo.chromaOffset; + srcDataOffset = srcImgInfo.chromaOffset; + for (int32_t uvh = 0; uvh < dstImgInfo.height / y2UvRatio; uvh++) { + err = memcpy_s(dstImgInfo.imgData + dstDataOffset, dstImgInfo.imgSize - dstDataOffset, + srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); + if (err != EOK) { + DHLOGE("memcpy_s UVPlane in line[%d] failed.", uvh); + return DCAMERA_MEMORY_OPT_ERROR; + } + dstDataOffset += dstImgInfo.alignedWidth; + srcDataOffset += srcImgInfo.alignedWidth; + } + DHLOGD("Copy UVplane end, dstDataOffset %d, srcDataOffset %d.", dstDataOffset, srcDataOffset); + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::CheckCopyImageInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + if (srcImgInfo.imgData == nullptr || dstImgInfo.imgData == nullptr) { + DHLOGE("The imgData of srcImgInfo or the imgData of dstImgInfo are null!"); + return DCAMERA_BAD_VALUE; + } + if (srcImgInfo.colorFormat != dstImgInfo.colorFormat) { + DHLOGE("CopyInfo error : srcImgInfo colorFormat %d, dstImgInfo colorFormat %d.", + srcImgInfo.colorFormat, dstImgInfo.colorFormat); + return DCAMERA_BAD_VALUE; + } + + if (!IsCorrectImageUnitInfo(srcImgInfo)) { + DHLOGE("srcImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + + "imgSize %lld.", srcImgInfo.width, srcImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight, + srcImgInfo.chromaOffset, srcImgInfo.imgSize); + return DCAMERA_BAD_VALUE; + } + if (!IsCorrectImageUnitInfo(dstImgInfo)) { + DHLOGE("dstImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + + "imgSize %lld.", dstImgInfo.width, dstImgInfo.height, dstImgInfo.alignedWidth, dstImgInfo.alignedHeight, + dstImgInfo.chromaOffset, dstImgInfo.imgSize); + return DCAMERA_BAD_VALUE; + } + + if (dstImgInfo.width > srcImgInfo.alignedWidth || dstImgInfo.height > srcImgInfo.alignedHeight) { + DHLOGE("Comparison ImgInfo fail: dstwidth %d, dstheight %d, srcAlignedWidth %d, srcAlignedHeight %d.", + dstImgInfo.width, dstImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight); + return DCAMERA_BAD_VALUE; + } + return DCAMERA_OK; +} + +bool DecodeDataProcess::IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo) +{ + int32_t y2UvRatio = 2; + int32_t bytesPerPixel = 3; + size_t expectedImgSize = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight * + bytesPerPixel / y2UvRatio); + size_t expectedChromaOffset = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight); + return (imgInfo.width <= imgInfo.alignedWidth && imgInfo.height <= imgInfo.alignedHeight && + imgInfo.imgSize >= expectedImgSize && imgInfo.chromaOffset == expectedChromaOffset); +} + +void DecodeDataProcess::PostOutputDataBuffers(std::shared_ptr& outputBuffer) +{ + if (eventBusDecode_ == nullptr || outputBuffer == nullptr) { + DHLOGE("eventBusDecode_ or outputBuffer is null."); + return; + } + std::vector> multiDataBuffers; + multiDataBuffers.push_back(outputBuffer); + std::shared_ptr transNextNodePacket = std::make_shared(VideoCodecType::NO_CODEC, + multiDataBuffers); + DCameraCodecEvent dCamCodecEv(*this, transNextNodePacket, VideoCodecAction::NO_ACTION); + eventBusDecode_->PostEvent(dCamCodecEv, POSTMODE::POST_ASYNC); + DHLOGD("Send video decoder output asynchronous DCameraCodecEvents success."); +} + +int32_t DecodeDataProcess::DecodeDone(std::vector> outputBuffers) +{ + DHLOGD("Decoder Done."); + if (outputBuffers.empty()) { + DHLOGE("The received data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + + if (nextDataProcess_ != nullptr) { + DHLOGD("Send to the next node of the decoder for processing."); + int32_t err = nextDataProcess_->ProcessData(outputBuffers); + if (err != DCAMERA_OK) { + DHLOGE("Someone node after the decoder processes failed."); + } + return err; + } + DHLOGD("The current node is the last node, and Output the processed video buffer"); + std::shared_ptr targetPipelineSource = callbackPipelineSource_.lock(); + if (targetPipelineSource == nullptr) { + DHLOGE("callbackPipelineSource_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + targetPipelineSource->OnProcessedVideoBuffer(outputBuffers[0]); + return DCAMERA_OK; +} + +void DecodeDataProcess::OnEvent(DCameraCodecEvent& ev) +{ + DHLOGD("Receiving asynchronous DCameraCodecEvents."); + std::shared_ptr receivedCodecPacket = ev.GetCodecPacket(); + VideoCodecAction action = ev.GetAction(); + switch (action) { + case VideoCodecAction::NO_ACTION: { + if (receivedCodecPacket == nullptr) { + DHLOGE("the received codecPacket of action [%d] is null.", action); + OnError(); + return; + } + + std::shared_ptr colorConverter = std::make_shared(); + VideoConfigParams decodedConfig(VideoCodecType::NO_CODEC, Videoformat::NV12, sourceConfig_.GetFrameRate(), + sourceConfig_.GetWidth(), sourceConfig_.GetHeight()); + std::vector> nv21DataBuffers; + std::shared_ptr nv21Image = colorConverter->ProcessData( + receivedCodecPacket->GetDataBuffers()[0], decodedConfig, targetConfig_); + nv21DataBuffers.push_back(nv21Image); + + DecodeDone(nv21DataBuffers); + break; + } + case VideoCodecAction::ACTION_ONCE_AGAIN: + DHLOGD("Try FeedDecoderInputBuffer again."); + FeedDecoderInputBuffer(); + return; + default: + DHLOGD("The action : %d is not supported.", action); + return; + } +} + +void DecodeDataProcess::OnError() +{ + DHLOGD("DecodeDataProcess : OnError."); + isDecoderProcess_ = false; + videoDecoder_->Stop(); + std::shared_ptr targetPipelineSource = callbackPipelineSource_.lock(); + if (targetPipelineSource == nullptr) { + DHLOGE("callbackPipelineSource_ is nullptr."); + return; + } + targetPipelineSource->OnError(DataProcessErrorType::ERROR_PIPELINE_DECODER); +} + +void DecodeDataProcess::OnInputBufferAvailable(uint32_t index) +{ + DHLOGD("DecodeDataProcess::OnInputBufferAvailable"); + std::lock_guard lck(mtxHoldCount_); + if (availableInputIndexsQueue_.size() > VIDEO_DECODER_QUEUE_MAX) { + DHLOGE("Video decoder available indexs queue overflow."); + return; + } + DHLOGD("Video decoder available indexs queue push index [%u].", index); + availableInputIndexsQueue_.push(index); +} + +void DecodeDataProcess::OnOutputFormatChanged(const Media::Format &format) +{ + if (decodeOutputFormat_.GetFormatMap().empty()) { + DHLOGE("The first changed video decoder output format is null."); + return; + } + decodeOutputFormat_ = format; +} + +void DecodeDataProcess::OnOutputBufferAvailable(uint32_t index, const Media::AVCodecBufferInfo& info, + const Media::AVCodecBufferFlag& flag) +{ + if (!isDecoderProcess_) { + DHLOGE("Decoder node occurred error or start release."); + return; + } + DHLOGD("Video decode buffer info: presentation TimeUs %lld, size %d, offset %d, flag %d", + info.presentationTimeUs, info.size, info.offset, flag); + outputInfo_ = info; + { + std::lock_guard lck(mtxDecoderState_); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before decoding data."); + return; + } + int32_t errRelease = videoDecoder_->ReleaseOutputBuffer(index, true); + if (errRelease != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("The video decoder output decoded data to surface failed, index : [%u].", index); + } + } +} + +VideoConfigParams DecodeDataProcess::GetSourceConfig() const +{ + return sourceConfig_; +} + +VideoConfigParams DecodeDataProcess::GetTargetConfig() const +{ + return targetConfig_; +} + +void DecodeSurfaceListener::OnBufferAvailable() +{ + DHLOGD("DecodeSurfaceListener : OnBufferAvailable."); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->GetDecoderOutputBuffer(surface_); +} + +void DecodeSurfaceListener::SetSurface(const sptr& surface) +{ + surface_ = surface; +} + +void DecodeSurfaceListener::SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode) +{ + decodeVideoNode_ = decodeVideoNode; +} + +DecodeSurfaceListener::~DecodeSurfaceListener() +{ + DHLOGD("DecodeSurfaceListener : ~DecodeSurfaceListener."); + surface_ = nullptr; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp b/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp new file mode 100644 index 00000000..77de7980 --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_data_process_common.cpp @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "decode_data_process.h" + +#include "distributed_hardware_log.h" +#include "graphic_common_c.h" + +#include "convert_nv12_to_nv21.h" +#include "dcamera_utils_tools.h" +#include "decode_video_callback.h" + +namespace OHOS { +namespace DistributedHardware { +DecodeDataProcess::~DecodeDataProcess() +{ + if (isDecoderProcess_) { + DHLOGD("~DecodeDataProcess : ReleaseProcessNode."); + ReleaseProcessNode(); + } +} + +int32_t DecodeDataProcess::InitNode() +{ + DHLOGD("Common Init DCamera DecodeNode start."); + if (!(IsInDecoderRange(sourceConfig_) && IsInDecoderRange(targetConfig_))) { + DHLOGE("Common Source config or target config are invalid."); + return DCAMERA_BAD_VALUE; + } + if (!IsConvertible(sourceConfig_, targetConfig_)) { + DHLOGE("Common The DecodeNode can't convert %d to %d.", sourceConfig_.GetVideoCodecType(), + targetConfig_.GetVideoCodecType()); + return DCAMERA_BAD_TYPE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("Disable DecodeNode. The target video codec type %d is the same as the source video codec type %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DCAMERA_OK; + } + + InitCodecEvent(); + int32_t err = InitDecoder(); + if (err != DCAMERA_OK) { + DHLOGE("Common Init video decoder fail."); + ReleaseProcessNode(); + return err; + } + alignedHeight_ = GetAlignedHeight(); + isDecoderProcess_ = true; + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::GetAlignedHeight() +{ + int32_t alignedBits = 32; + int32_t alignedHeight = static_cast(sourceConfig_.GetHeight()); + if (alignedHeight % alignedBits != 0) { + alignedHeight = ((alignedHeight / alignedBits) + 1) * alignedBits; + } + return alignedHeight; +} + +bool DecodeDataProcess::IsInDecoderRange(const VideoConfigParams& curConfig) +{ + return (curConfig.GetWidth() >= MIN_VIDEO_WIDTH || curConfig.GetWidth() <= MAX_VIDEO_WIDTH || + curConfig.GetHeight() >= MIN_VIDEO_HEIGHT || curConfig.GetHeight() <= MAX_VIDEO_HEIGHT || + curConfig.GetFrameRate() <= MAX_FRAME_RATE); +} + +bool DecodeDataProcess::IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig) +{ + return (sourceConfig.GetVideoCodecType() == targetConfig.GetVideoCodecType() || + targetConfig.GetVideoCodecType() == VideoCodecType::NO_CODEC); +} + +void DecodeDataProcess::InitCodecEvent() +{ + DHLOGD("Common Init DecodeNode eventBus, and add handler for it."); + eventBusDecode_ = std::make_shared(); + DCameraCodecEvent codecEvent(*this, std::make_shared()); + eventBusRegHandleDecode_ = eventBusDecode_->AddHandler(codecEvent.GetType(), *this); + + DHLOGD("Common Add handler for DCamera pipeline eventBus."); + eventBusRegHandlePipeline2Decode_ = eventBusPipeline_->AddHandler(codecEvent.GetType(), *this); +} + +int32_t DecodeDataProcess::InitDecoder() +{ + DHLOGD("Common Init video decoder."); + int32_t err = InitDecoderMetadataFormat(); + if (err != DCAMERA_OK) { + DHLOGE("Init video decoder metadata format fail."); + return err; + } + + videoDecoder_ = Media::VideoDecoderFactory::CreateByMime(processType_); + if (videoDecoder_ == nullptr) { + DHLOGE("Create video decoder failed."); + return DCAMERA_INIT_ERR; + } + decodeVideoCallback_ = std::make_shared(shared_from_this()); + int32_t retVal = videoDecoder_->SetCallback(decodeVideoCallback_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video decoder callback failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoDecoder_->Configure(metadataFormat_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video decoder metadata format failed."); + return DCAMERA_INIT_ERR; + } + retVal = SetDecoderOutputSurface(); + if (retVal != DCAMERA_OK) { + DHLOGE("Set decoder output surface fail."); + return retVal; + } + + retVal = videoDecoder_->Prepare(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video decoder prepare failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoDecoder_->Start(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video decoder start failed."); + return DCAMERA_INIT_ERR; + } + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::InitDecoderMetadataFormat() +{ + DHLOGD("Common Init video decoder metadata format."); + processType_ = "video/mp4v-es"; + metadataFormat_.PutStringValue("codec_mime", processType_); + + int32_t width = (int32_t)sourceConfig_.GetWidth(); + int32_t height = (int32_t)sourceConfig_.GetHeight(); + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::RGBA); + metadataFormat_.PutIntValue("max_input_size", width * height * 4 * 2); + metadataFormat_.PutIntValue("width", width); + metadataFormat_.PutIntValue("height", height); + metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::SetDecoderOutputSurface() +{ + DHLOGD("Set the video decoder output surface."); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder is null."); + return DCAMERA_BAD_VALUE; + } + + decodeConsumerSurface_ = Surface::CreateSurfaceAsConsumer(); + if (decodeConsumerSurface_ == nullptr) { + DHLOGE("Creat the decode consumer surface fail."); + return DCAMERA_INIT_ERR; + } + decodeConsumerSurface_->SetDefaultWidthAndHeight((int32_t)sourceConfig_.GetWidth(), + (int32_t)sourceConfig_.GetHeight()); + decodeSurfaceListener_ = new DecodeSurfaceListener(decodeConsumerSurface_, shared_from_this()); + if (decodeConsumerSurface_->RegisterConsumerListener(decodeSurfaceListener_) != + SURFACE_ERROR_OK) { + DHLOGE("Register consumer listener fail."); + return DCAMERA_INIT_ERR; + } + + sptr surfaceProducer = decodeConsumerSurface_->GetProducer(); + if (surfaceProducer == nullptr) { + DHLOGE("Get the surface producer of the decode consumer surface fail."); + return DCAMERA_INIT_ERR; + } + decodeProducerSurface_ = Surface::CreateSurfaceAsProducer(surfaceProducer); + if (decodeProducerSurface_ == nullptr) { + DHLOGE("Creat the decode producer surface of the decode consumer surface fail."); + return DCAMERA_INIT_ERR; + } + + DHLOGD("Set the producer surface to video decoder output surface."); + int32_t err = videoDecoder_->SetOutputSurface(decodeProducerSurface_); + if (err != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set decoder output surface fail."); + return DCAMERA_INIT_ERR; + } + return DCAMERA_OK; +} + +void DecodeDataProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : DecodeNode.", nodeRank_); + isDecoderProcess_ = false; + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); + } + if (eventBusDecode_ != nullptr && eventBusPipeline_ != nullptr) { + DHLOGD("Start release DecodeNode eventBusDecode_ and eventBusPipeline_."); + DCameraCodecEvent codecEvent(*this, std::make_shared()); + eventBusDecode_->RemoveHandler(codecEvent.GetType(), eventBusRegHandleDecode_); + eventBusDecode_ = nullptr; + eventBusPipeline_->RemoveHandler(codecEvent.GetType(), eventBusRegHandlePipeline2Decode_); + eventBusPipeline_ = nullptr; + } + + { + std::lock_guard lck(mtxDecoderState_); + if (videoDecoder_ != nullptr) { + DHLOGD("Start release videoDecoder."); + videoDecoder_->Flush(); + videoDecoder_->Stop(); + videoDecoder_->Release(); + videoDecoder_ = nullptr; + decodeVideoCallback_ = nullptr; + } + } + if (decodeConsumerSurface_ != nullptr) { + int32_t ret = decodeConsumerSurface_->UnregisterConsumerListener(); + if (ret != SURFACE_ERROR_OK) { + DHLOGE("Unregister consumer listener failed. Error type: %d.", ret); + } + decodeConsumerSurface_ = nullptr; + decodeProducerSurface_ = nullptr; + decodeSurfaceListener_ = nullptr; + } + + processType_ = ""; + std::queue> emptyBuffersQueue; + inputBuffersQueue_.swap(emptyBuffersQueue); + std::queue emptyIndexsQueue; + availableInputIndexsQueue_.swap(emptyIndexsQueue); + waitDecoderOutputCount_ = 0; + lastFeedDecoderInputBufferTimeUs_ = 0; + outputTimeStampUs_ = 0; + alignedHeight_ = 0; + DHLOGD("Release [%d] node : DecodeNode end.", nodeRank_); +} + +int32_t DecodeDataProcess::ProcessData(std::vector>& inputBuffers) +{ + DHLOGD("Process data in DecodeDataProcess."); + if (inputBuffers.empty()) { + DHLOGE("The input data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("The target VideoCodecType : %d is the same as the source VideoCodecType : %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DecodeDone(inputBuffers); + } + + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before decoding data."); + return DCAMERA_INIT_ERR; + } + if (inputBuffersQueue_.size() > VIDEO_DECODER_QUEUE_MAX) { + DHLOGE("video decoder input buffers queue over flow."); + return DCAMERA_INDEX_OVERFLOW; + } + int32_t bufferSize = 1920 * 1808 * 4 * 2; + if (inputBuffers[0]->Size() > bufferSize) { + DHLOGE("DecodeNode input buffer size %d error.", inputBuffers[0]->Size()); + return DCAMERA_MEMORY_OPT_ERROR; + } + if (!isDecoderProcess_) { + DHLOGE("Decoder node occurred error or start release."); + return DCAMERA_DISABLE_PROCESS; + } + inputBuffersQueue_.push(inputBuffers[0]); + DHLOGD("Push inputBuffer sucess. BufSize %d, QueueSize %d.", inputBuffers[0]->Size(), inputBuffersQueue_.size()); + int32_t err = FeedDecoderInputBuffer(); + if (err != DCAMERA_OK) { + int32_t sleepTimeUs = 5000; + std::this_thread::sleep_for(std::chrono::microseconds(sleepTimeUs)); + DHLOGD("Feed decoder input buffer fail. Try FeedDecoderInputBuffer again."); + std::shared_ptr reFeedInputPacket = std::make_shared(); + reFeedInputPacket->SetVideoCodecType(sourceConfig_.GetVideoCodecType()); + DCameraCodecEvent dCamCodecEv(*this, reFeedInputPacket, VideoCodecAction::ACTION_ONCE_AGAIN); + if (eventBusPipeline_ == nullptr) { + DHLOGE("eventBusPipeline_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + eventBusPipeline_->PostEvent(dCamCodecEv, POSTMODE::POST_ASYNC); + } + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::FeedDecoderInputBuffer() +{ + DHLOGD("Feed decoder input buffer."); + while ((!inputBuffersQueue_.empty()) && (isDecoderProcess_)) { + std::shared_ptr buffer = inputBuffersQueue_.front(); + if (buffer == nullptr || availableInputIndexsQueue_.empty()) { + DHLOGE("inputBuffersQueue size %d, availableInputIndexsQueue size %d.", + inputBuffersQueue_.size(), availableInputIndexsQueue_.size()); + return DCAMERA_BAD_VALUE; + } + + { + std::lock_guard lck(mtxDecoderState_); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before GetInputBuffer."); + return DCAMERA_OK; + } + uint32_t index = availableInputIndexsQueue_.front(); + std::shared_ptr sharedMemoryInput = videoDecoder_->GetInputBuffer(index); + if (sharedMemoryInput == nullptr) { + DHLOGE("Failed to obtain the input shared memory corresponding to the [%d] index.", index); + return DCAMERA_BAD_VALUE; + } + size_t inputMemoDataSize = static_cast(sharedMemoryInput->GetSize()); + errno_t err = memcpy_s(sharedMemoryInput->GetBase(), inputMemoDataSize, buffer->Data(), buffer->Size()); + if (err != EOK) { + DHLOGE("memcpy_s buffer failed."); + return DCAMERA_MEMORY_OPT_ERROR; + } + int64_t timeUs = GetDecoderTimeStamp(); + DHLOGD("Decoder input buffer size %d, timeStamp %lld.", buffer->Size(), (long long)timeUs); + Media::AVCodecBufferInfo bufferInfo {timeUs, static_cast(buffer->Size()), 0}; + int32_t ret = videoDecoder_->QueueInputBuffer(index, bufferInfo, + Media::AVCODEC_BUFFER_FLAG_NONE); + if (ret != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("queue Input buffer failed."); + return DCAMERA_BAD_OPERATE; + } + } + + inputBuffersQueue_.pop(); + DHLOGD("Push inputBuffer sucess. inputBuffersQueue size is %d.", inputBuffersQueue_.size()); + + { + std::lock_guard lck(mtxHoldCount_); + availableInputIndexsQueue_.pop(); + waitDecoderOutputCount_++; + DHLOGD("Wait decoder output frames number is %d.", waitDecoderOutputCount_); + } + } + return DCAMERA_OK; +} + +int64_t DecodeDataProcess::GetDecoderTimeStamp() +{ + int64_t TimeDifferenceStampUs = 0; + int64_t nowTimeUs = GetNowTimeStampUs(); + if (lastFeedDecoderInputBufferTimeUs_ == 0) { + lastFeedDecoderInputBufferTimeUs_ = nowTimeUs; + return TimeDifferenceStampUs; + } + TimeDifferenceStampUs = nowTimeUs - lastFeedDecoderInputBufferTimeUs_; + lastFeedDecoderInputBufferTimeUs_ = nowTimeUs; + return TimeDifferenceStampUs; +} + +void DecodeDataProcess::GetDecoderOutputBuffer(const sptr& surface) +{ + DHLOGD("Get decoder output buffer."); + if (surface == nullptr) { + DHLOGE("Get decode consumer surface failed."); + return; + } + Rect damage = {0, 0, 0, 0}; + int32_t acquireFence = 0; + int64_t timeStampUs = 0; + sptr surfaceBuffer = nullptr; + GSError ret = surface->AcquireBuffer(surfaceBuffer, acquireFence, timeStampUs, damage); + if (ret != GSERROR_OK || surfaceBuffer == nullptr) { + DHLOGE("Acquire surface buffer failed!"); + return; + } + int32_t alignedWidth = surfaceBuffer->GetStride(); + int32_t alignedHeight = alignedHeight_; + DHLOGD("OutputBuffer alignedWidth %d, alignedHeight %d, TimeUs %lld.", alignedWidth, alignedHeight, timeStampUs); + CopyDecodedImage(surfaceBuffer, timeStampUs, alignedWidth, alignedHeight); + surface->ReleaseBuffer(surfaceBuffer, -1); + outputTimeStampUs_ = timeStampUs; + { + std::lock_guard lck(mtxHoldCount_); + if (waitDecoderOutputCount_ <= 0) { + DHLOGE("The waitDecoderOutputCount_ = %d.", waitDecoderOutputCount_); + } + if (outputTimeStampUs_ == 0) { + waitDecoderOutputCount_ -= FIRST_FRAME_INPUT_NUM; + } else { + waitDecoderOutputCount_--; + } + DHLOGD("Wait decoder output frames number is %d.", waitDecoderOutputCount_); + } +} + +void DecodeDataProcess::CopyDecodedImage(const sptr& surBuf, int64_t timeStampUs, int32_t alignedWidth, + int32_t alignedHeight) +{ + if (surBuf == nullptr) { + DHLOGE("surface buffer is null!"); + return; + } + size_t validDecodedImageSize = static_cast(sourceConfig_.GetWidth() * sourceConfig_.GetHeight() * 4); + size_t surfaceBufSize = static_cast(surBuf->GetSize()); + if (validDecodedImageSize > surfaceBufSize) { + DHLOGE("Buffer size error, validDecodedImageSize %d, surBufSize %d.", + validDecodedImageSize, surBuf->GetSize()); + return; + } + std::shared_ptr bufferOutput = std::make_shared(validDecodedImageSize); + uint8_t *addr = static_cast(surBuf->GetVirAddr()); + errno_t err = memcpy_s(bufferOutput->Data(), bufferOutput->Size(), addr, validDecodedImageSize); + if (err != EOK) { + DHLOGE("memcpy_s surface buffer failed."); + return; + } + bufferOutput->SetInt64("timeUs", timeStampUs); + bufferOutput->SetInt32("Videoformat", static_cast(sourceConfig_.GetVideoformat())); + bufferOutput->SetInt32("alignedWidth", static_cast(sourceConfig_.GetWidth())); + bufferOutput->SetInt32("alignedHeight", static_cast(sourceConfig_.GetHeight())); + bufferOutput->SetInt32("width", static_cast(sourceConfig_.GetWidth())); + bufferOutput->SetInt32("height", static_cast(sourceConfig_.GetHeight())); + PostOutputDataBuffers(bufferOutput); +} + +int32_t DecodeDataProcess::CopyYUVPlaneByRow(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + int32_t ret = CheckCopyImageInfo(srcImgInfo, dstImgInfo); + if (ret != DCAMERA_OK) { + DHLOGE("Check CopyImageUnitInfo failed."); + return ret; + } + errno_t err = EOK; + int32_t srcDataOffset = 0; + int32_t dstDataOffset = 0; + for (int32_t yh = 0; yh < dstImgInfo.height; yh++) { + err = memcpy_s(dstImgInfo.imgData + dstDataOffset, dstImgInfo.chromaOffset - dstDataOffset, + srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); + if (err != EOK) { + DHLOGE("memcpy_s YPlane in line[%d] failed.", yh); + return DCAMERA_MEMORY_OPT_ERROR; + } + dstDataOffset += dstImgInfo.alignedWidth; + srcDataOffset += srcImgInfo.alignedWidth; + } + DHLOGD("Copy Yplane end, dstDataOffset %d, srcDataOffset %d, validYPlaneSize %d.", + dstDataOffset, srcDataOffset, dstImgInfo.chromaOffset); + + int32_t y2UvRatio = 2; + dstDataOffset = dstImgInfo.chromaOffset; + srcDataOffset = srcImgInfo.chromaOffset; + for (int32_t uvh = 0; uvh < dstImgInfo.height / y2UvRatio; uvh++) { + err = memcpy_s(dstImgInfo.imgData + dstDataOffset, dstImgInfo.imgSize - dstDataOffset, + srcImgInfo.imgData + srcDataOffset, dstImgInfo.width); + if (err != EOK) { + DHLOGE("memcpy_s UVPlane in line[%d] failed.", uvh); + return DCAMERA_MEMORY_OPT_ERROR; + } + dstDataOffset += dstImgInfo.alignedWidth; + srcDataOffset += srcImgInfo.alignedWidth; + } + DHLOGD("Copy UVplane end, dstDataOffset %d, srcDataOffset %d.", dstDataOffset, srcDataOffset); + return DCAMERA_OK; +} + +int32_t DecodeDataProcess::CheckCopyImageInfo(const ImageUnitInfo& srcImgInfo, const ImageUnitInfo& dstImgInfo) +{ + if (srcImgInfo.imgData == nullptr || dstImgInfo.imgData == nullptr) { + DHLOGE("The imgData of srcImgInfo or the imgData of dstImgInfo are null!"); + return DCAMERA_BAD_VALUE; + } + if (srcImgInfo.colorFormat != dstImgInfo.colorFormat) { + DHLOGE("CopyInfo error : srcImgInfo colorFormat %d, dstImgInfo colorFormat %d.", + srcImgInfo.colorFormat, dstImgInfo.colorFormat); + return DCAMERA_BAD_VALUE; + } + + if (!IsCorrectImageUnitInfo(srcImgInfo)) { + DHLOGE("srcImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + + "imgSize %lld.", srcImgInfo.width, srcImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight, + srcImgInfo.chromaOffset, srcImgInfo.imgSize); + return DCAMERA_BAD_VALUE; + } + if (!IsCorrectImageUnitInfo(dstImgInfo)) { + DHLOGE("dstImginfo fail: width %d, height %d, alignedWidth %d, alignedHeight %d, chromaOffset %lld, " + + "imgSize %lld.", dstImgInfo.width, dstImgInfo.height, dstImgInfo.alignedWidth, dstImgInfo.alignedHeight, + dstImgInfo.chromaOffset, dstImgInfo.imgSize); + return DCAMERA_BAD_VALUE; + } + + if (dstImgInfo.width > srcImgInfo.alignedWidth || dstImgInfo.height > srcImgInfo.alignedHeight) { + DHLOGE("Comparison ImgInfo fail: dstwidth %d, dstheight %d, srcAlignedWidth %d, srcAlignedHeight %d.", + dstImgInfo.width, dstImgInfo.height, srcImgInfo.alignedWidth, srcImgInfo.alignedHeight); + return DCAMERA_BAD_VALUE; + } + return DCAMERA_OK; +} + +bool DecodeDataProcess::IsCorrectImageUnitInfo(const ImageUnitInfo& imgInfo) +{ + int32_t y2UvRatio = 2; + int32_t bytesPerPixel = 3; + size_t expectedImgSize = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight * + bytesPerPixel / y2UvRatio); + size_t expectedChromaOffset = static_cast(imgInfo.alignedWidth * imgInfo.alignedHeight); + return (imgInfo.width <= imgInfo.alignedWidth && imgInfo.height <= imgInfo.alignedHeight && + imgInfo.imgSize >= expectedImgSize && imgInfo.chromaOffset == expectedChromaOffset); +} + +void DecodeDataProcess::PostOutputDataBuffers(std::shared_ptr& outputBuffer) +{ + if (eventBusDecode_ == nullptr || outputBuffer == nullptr) { + DHLOGE("eventBusDecode_ or outputBuffer is null."); + return; + } + std::vector> multiDataBuffers; + multiDataBuffers.push_back(outputBuffer); + std::shared_ptr transNextNodePacket = std::make_shared(VideoCodecType::NO_CODEC, + multiDataBuffers); + DCameraCodecEvent dCamCodecEv(*this, transNextNodePacket, VideoCodecAction::NO_ACTION); + eventBusDecode_->PostEvent(dCamCodecEv, POSTMODE::POST_ASYNC); + DHLOGD("Send video decoder output asynchronous DCameraCodecEvents success."); +} + +int32_t DecodeDataProcess::DecodeDone(std::vector> outputBuffers) +{ + DHLOGD("Decoder Done."); + if (outputBuffers.empty()) { + DHLOGE("The received data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + + if (nextDataProcess_ != nullptr) { + DHLOGD("Send to the next node of the decoder for processing."); + int32_t err = nextDataProcess_->ProcessData(outputBuffers); + if (err != DCAMERA_OK) { + DHLOGE("Someone node after the decoder processes fail."); + } + return err; + } + DHLOGD("The current node is the last node, and Output the processed video buffer"); + std::shared_ptr targetPipelineSource = callbackPipelineSource_.lock(); + if (targetPipelineSource == nullptr) { + DHLOGE("callbackPipelineSource_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + targetPipelineSource->OnProcessedVideoBuffer(outputBuffers[0]); + return DCAMERA_OK; +} + +void DecodeDataProcess::OnEvent(DCameraCodecEvent& ev) +{ + DHLOGD("Receiving asynchronous DCameraCodecEvents."); + std::shared_ptr receivedCodecPacket = ev.GetCodecPacket(); + VideoCodecAction action = ev.GetAction(); + switch (action) { + case VideoCodecAction::NO_ACTION: { + if (receivedCodecPacket == nullptr) { + DHLOGE("the received codecPacket of action [%d] is null.", action); + OnError(); + return; + } + DecodeDone(receivedCodecPacket->GetDataBuffers()); + break; + } + case VideoCodecAction::ACTION_ONCE_AGAIN: + DHLOGD("Try FeedDecoderInputBuffer again."); + FeedDecoderInputBuffer(); + return; + default: + DHLOGD("The action : %d is not supported.", action); + return; + } +} + +void DecodeDataProcess::OnError() +{ + DHLOGD("DecodeDataProcess : OnError."); + isDecoderProcess_ = false; + videoDecoder_->Stop(); + std::shared_ptr targetPipelineSource = callbackPipelineSource_.lock(); + if (targetPipelineSource == nullptr) { + DHLOGE("callbackPipelineSource_ is nullptr."); + return; + } + targetPipelineSource->OnError(DataProcessErrorType::ERROR_PIPELINE_DECODER); +} + +void DecodeDataProcess::OnInputBufferAvailable(uint32_t index) +{ + DHLOGD("DecodeDataProcess::OnInputBufferAvailable"); + std::lock_guard lck(mtxHoldCount_); + if (availableInputIndexsQueue_.size() > VIDEO_DECODER_QUEUE_MAX) { + DHLOGE("Video decoder available indexs queue overflow."); + return; + } + DHLOGD("Video decoder available indexs queue push index [%d].", index); + availableInputIndexsQueue_.push(index); +} + +void DecodeDataProcess::OnOutputFormatChanged(const Media::Format &format) +{ + if (decodeOutputFormat_.GetFormatMap().empty()) { + DHLOGE("The first changed video decoder output format is null."); + return; + } + decodeOutputFormat_ = format; +} + +void DecodeDataProcess::OnOutputBufferAvailable(uint32_t index, const Media::AVCodecBufferInfo& info, + const Media::AVCodecBufferFlag& flag) +{ + if (!isDecoderProcess_) { + DHLOGE("Decoder node occurred error or start release."); + return; + } + DHLOGD("Video decode buffer info: presentation TimeUs %lld, size %d, offset %d, flag %d", + info.presentationTimeUs, info.size, info.offset, flag); + outputInfo_ = info; + { + std::lock_guard lck(mtxDecoderState_); + if (videoDecoder_ == nullptr) { + DHLOGE("The video decoder does not exist before decoding data."); + return; + } + int32_t errRelease = videoDecoder_->ReleaseOutputBuffer(index, true); + if (errRelease != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("The video decoder output decoded data to surface fail, index : [%d].", index); + } + } +} + +VideoConfigParams DecodeDataProcess::GetSourceConfig() const +{ + return sourceConfig_; +} + +VideoConfigParams DecodeDataProcess::GetTargetConfig() const +{ + return targetConfig_; +} + +void DecodeSurfaceListener::OnBufferAvailable() +{ + DHLOGD("DecodeSurfaceListener : OnBufferAvailable."); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->GetDecoderOutputBuffer(surface_); +} + +void DecodeSurfaceListener::SetSurface(const sptr& surface) +{ + surface_ = surface; +} + +void DecodeSurfaceListener::SetDecodeVideoNode(const std::weak_ptr& decodeVideoNode) +{ + decodeVideoNode_ = decodeVideoNode; +} + +DecodeSurfaceListener::~DecodeSurfaceListener() +{ + DHLOGD("DecodeSurfaceListener : ~DecodeSurfaceListener."); + surface_ = nullptr; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_video_callback.cpp b/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_video_callback.cpp new file mode 100644 index 00000000..04b1bcc6 --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/multimedia_codec/decode_video_callback.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "distributed_hardware_log.h" + +namespace OHOS { +namespace DistributedHardware { +void DecodeVideoCallback::OnError(Media::AVCodecErrorType errorType, int32_t errorCode) +{ + DHLOGE("DecodeVideoCallback : OnError. Error type: %d. Error code: %d ", errorType, errorCode); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->OnError(); +} + +void DecodeVideoCallback::OnInputBufferAvailable(uint32_t index) +{ + DHLOGD("DecodeVideoCallback : OnInputBufferAvailable."); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->OnInputBufferAvailable(index); +} + +void DecodeVideoCallback::OnOutputFormatChanged(const Media::Format &format) +{ + DHLOGD("DecodeVideoCallback : OnOutputFormatChanged."); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->OnOutputFormatChanged(format); +} + +void DecodeVideoCallback::OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, + Media::AVCodecBufferFlag flag) +{ + DHLOGD("DecodeVideoCallback : OnOutputBufferAvailable. Only relaese buffer when using surface output."); + std::shared_ptr targetDecoderNode = decodeVideoNode_.lock(); + if (targetDecoderNode == nullptr) { + DHLOGE("decodeVideoNode_ is nullptr."); + return; + } + targetDecoderNode->OnOutputBufferAvailable(index, info, flag); +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process.cpp b/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process.cpp new file mode 100644 index 00000000..834b0390 --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process.cpp @@ -0,0 +1,498 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "encode_data_process.h" + +#include + +#include "display_type.h" +#include "distributed_hardware_log.h" +#include "graphic_common_c.h" + +#include "dcamera_utils_tools.h" +#include "encode_video_callback.h" + +#ifndef DH_LOG_TAG +#define DH_LOG_TAG "DCDP_NODE_ENCODEC" +#endif + +namespace OHOS { +namespace DistributedHardware { +const std::map EncodeDataProcess::ENCODER_BITRATE_TABLE = { + std::map::value_type(WIDTH_320_HEIGHT_240, BITRATE_500000), + std::map::value_type(WIDTH_480_HEIGHT_360, BITRATE_1110000), + std::map::value_type(WIDTH_640_HEIGHT_360, BITRATE_1500000), + std::map::value_type(WIDTH_640_HEIGHT_480, BITRATE_1800000), + std::map::value_type(WIDTH_720_HEIGHT_540, BITRATE_2100000), + std::map::value_type(WIDTH_960_HEIGHT_540, BITRATE_2300000), + std::map::value_type(WIDTH_960_HEIGHT_720, BITRATE_2800000), + std::map::value_type(WIDTH_1280_HEIGHT_720, BITRATE_3400000), + std::map::value_type(WIDTH_1440_HEIGHT_1080, BITRATE_5000000), + std::map::value_type(WIDTH_1920_HEIGHT_1080, BITRATE_6000000), +}; + +EncodeDataProcess::~EncodeDataProcess() +{ + if (isEncoderProcess_) { + DHLOGD("~EncodeDataProcess : ReleaseProcessNode."); + ReleaseProcessNode(); + } +} + +int32_t EncodeDataProcess::InitNode() +{ + DHLOGD("Init DCamera EncodeNode start."); + if (!(IsInEncoderRange(sourceConfig_) && IsInEncoderRange(targetConfig_))) { + DHLOGE("Source config or target config are invalid."); + return DCAMERA_BAD_VALUE; + } + if (!IsConvertible(sourceConfig_, targetConfig_)) { + DHLOGE("The EncodeNode cannot convert source VideoCodecType %d to target VideoCodecType %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DCAMERA_BAD_TYPE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("Disable EncodeNode. The target VideoCodecType %d is the same as the source VideoCodecType %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DCAMERA_OK; + } + + int32_t err = InitEncoder(); + if (err != DCAMERA_OK) { + DHLOGE("Init video encoder failed."); + ReleaseProcessNode(); + return err; + } + isEncoderProcess_ = true; + return DCAMERA_OK; +} + +bool EncodeDataProcess::IsInEncoderRange(const VideoConfigParams& curConfig) +{ + return (curConfig.GetWidth() >= MIN_VIDEO_WIDTH || curConfig.GetWidth() <= MAX_VIDEO_WIDTH || + curConfig.GetHeight() >= MIN_VIDEO_HEIGHT || curConfig.GetHeight() <= MAX_VIDEO_HEIGHT || + curConfig.GetFrameRate() <= MAX_FRAME_RATE); +} + +bool EncodeDataProcess::IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig) +{ + return (sourceConfig.GetVideoCodecType() == targetConfig.GetVideoCodecType() || + sourceConfig.GetVideoCodecType() == VideoCodecType::NO_CODEC); +} + +int32_t EncodeDataProcess::InitEncoder() +{ + DHLOGD("Init video encoder."); + int32_t err = InitEncoderMetadataFormat(); + if (err != DCAMERA_OK) { + DHLOGE("Init video encoder metadata format failed."); + return err; + } + err = InitEncoderBitrateFormat(); + if (err != DCAMERA_OK) { + DHLOGE("Init video encoder bitrate format failed."); + return err; + } + + videoEncoder_ = Media::VideoEncoderFactory::CreateByMime(processType_); + if (videoEncoder_ == nullptr) { + DHLOGE("Create video encoder failed."); + return DCAMERA_INIT_ERR; + } + encodeVideoCallback_ = std::make_shared(shared_from_this()); + int32_t retVal = videoEncoder_->SetCallback(encodeVideoCallback_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video encoder callback failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoEncoder_->Configure(metadataFormat_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video encoder metadata format failed."); + return DCAMERA_INIT_ERR; + } + encodeProducerSurface_ = videoEncoder_->CreateInputSurface(); + if (encodeProducerSurface_ == nullptr) { + DHLOGE("Get video encoder producer surface failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoEncoder_->Prepare(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video encoder prepare failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoEncoder_->Start(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video encoder start failed."); + return DCAMERA_INIT_ERR; + } + return DCAMERA_OK; +} + +int32_t EncodeDataProcess::InitEncoderMetadataFormat() +{ + DHLOGD("Init video encoder metadata format."); + switch (targetConfig_.GetVideoCodecType()) { + case VideoCodecType::CODEC_H264: + processType_ = "video/avc"; + metadataFormat_.PutStringValue("codec_mime", processType_); + metadataFormat_.PutIntValue("codec_profile", Media::AVCProfile::AVC_PROFILE_BASELINE); + break; + case VideoCodecType::CODEC_H265: + processType_ = "video/hevc"; + metadataFormat_.PutStringValue("codec_mime", processType_); + metadataFormat_.PutIntValue("codec_profile", Media::HEVCProfile::HEVC_PROFILE_MAIN); + break; + default: + DHLOGE("The current codec type does not support encoding."); + return DCAMERA_NOT_FOUND; + } + switch (sourceConfig_.GetVideoformat()) { + case Videoformat::YUVI420: + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::YUVI420); + break; + case Videoformat::NV12: + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::NV12); + break; + case Videoformat::NV21: + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::NV21); + break; + default: + DHLOGE("The current pixel format does not support encoding."); + return DCAMERA_NOT_FOUND; + } + + metadataFormat_.PutLongValue("max_input_size", NORM_YUV420_BUFFER_SIZE); + metadataFormat_.PutIntValue("width", (int32_t)sourceConfig_.GetWidth()); + metadataFormat_.PutIntValue("height", (int32_t)sourceConfig_.GetHeight()); + metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); + return DCAMERA_OK; +} + +int32_t EncodeDataProcess::InitEncoderBitrateFormat() +{ + DHLOGD("Init video encoder bitrate format."); + if (!(IsInEncoderRange(sourceConfig_) && IsInEncoderRange(targetConfig_))) { + DHLOGE("Source config or target config are invalid."); + return DCAMERA_BAD_VALUE; + } + metadataFormat_.PutIntValue("i_frame_interval", IDR_FRAME_INTERVAL_MS); + metadataFormat_.PutIntValue("video_encode_bitrate_mode", Media::VideoEncodeBitrateMode::VBR); + + if (ENCODER_BITRATE_TABLE.empty()) { + DHLOGD("ENCODER_BITRATE_TABLE is null, use the default bitrate of the encoder."); + return DCAMERA_OK; + } + int64_t pixelformat = static_cast(sourceConfig_.GetWidth() * sourceConfig_.GetHeight()); + int32_t matchedBitrate = BITRATE_6000000; + int64_t minPixelformatDiff = WIDTH_1920_HEIGHT_1080 - pixelformat; + for (auto it = ENCODER_BITRATE_TABLE.begin(); it != ENCODER_BITRATE_TABLE.end(); it++) { + int64_t pixelformatDiff = abs(pixelformat - it->first); + if (pixelformatDiff == 0) { + matchedBitrate = it->second; + break; + } + if (minPixelformatDiff >= pixelformatDiff) { + minPixelformatDiff = pixelformatDiff; + matchedBitrate = it->second; + } + } + DHLOGD("Source config: width : %d, height : %d, matched bitrate %d.", sourceConfig_.GetWidth(), + sourceConfig_.GetHeight(), matchedBitrate); + metadataFormat_.PutIntValue("bitrate", matchedBitrate); + return DCAMERA_OK; +} + +void EncodeDataProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : EncodeNode.", nodeRank_); + isEncoderProcess_ = false; + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); + } + + { + std::lock_guard lck(mtxEncoderState_); + if (videoEncoder_ != nullptr) { + DHLOGD("Start release videoEncoder."); + videoEncoder_->Flush(); + videoEncoder_->Stop(); + videoEncoder_->Release(); + encodeProducerSurface_ = nullptr; + videoEncoder_ = nullptr; + encodeVideoCallback_ = nullptr; + } + } + + waitEncoderOutputCount_ = 0; + lastFeedEncoderInputBufferTimeUs_ = 0; + inputTimeStampUs_ = 0; + processType_ = ""; + DHLOGD("Release [%d] node : EncodeNode end.", nodeRank_); +} + +int32_t EncodeDataProcess::ProcessData(std::vector>& inputBuffers) +{ + DHLOGD("Process data in EncodeDataProcess."); + if (inputBuffers.empty()) { + DHLOGE("The input data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("The target VideoCodecType : %d is the same as the source VideoCodecType : %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return EncodeDone(inputBuffers); + } + + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before encoding data."); + return DCAMERA_INIT_ERR; + } + if (inputBuffers[0]->Size() > NORM_YUV420_BUFFER_SIZE) { + DHLOGE("EncodeNode input buffer size %d error.", inputBuffers[0]->Size()); + return DCAMERA_MEMORY_OPT_ERROR; + } + if (!isEncoderProcess_) { + DHLOGE("EncodeNode occurred error or start release."); + return DCAMERA_DISABLE_PROCESS; + } + int32_t err = FeedEncoderInputBuffer(inputBuffers[0]); + if (err != DCAMERA_OK) { + DHLOGE("Feed encoder input Buffer failed."); + return err; + } + { + std::lock_guard lck(mtxHoldCount_); + if (inputTimeStampUs_ == 0) { + waitEncoderOutputCount_ += FIRST_FRAME_OUTPUT_NUM; + } else { + waitEncoderOutputCount_++; + } + DHLOGD("Wait encoder output frames number is %d.", waitEncoderOutputCount_); + } + return DCAMERA_OK; +} + +int32_t EncodeDataProcess::FeedEncoderInputBuffer(std::shared_ptr& inputBuffer) +{ + std::lock_guard lck(mtxEncoderState_); + DHLOGD("Feed encoder input buffer, buffer size %d.", inputBuffer->Size()); + if (encodeProducerSurface_ == nullptr) { + DHLOGE("Get encoder input producer surface failed."); + return DCAMERA_INIT_ERR; + } + sptr surfacebuffer = GetEncoderInputSurfaceBuffer(); + if (surfacebuffer == nullptr) { + DHLOGE("Get encoder input producer surface buffer failed."); + return DCAMERA_BAD_OPERATE; + } + uint8_t *addr = static_cast(surfacebuffer->GetVirAddr()); + if (addr == nullptr) { + DHLOGE("SurfaceBuffer address is nullptr"); + encodeProducerSurface_->CancelBuffer(surfacebuffer); + return DCAMERA_BAD_OPERATE; + } + size_t size = static_cast(surfacebuffer->GetSize()); + errno_t err = memcpy_s(addr, size, inputBuffer->Data(), inputBuffer->Size()); + if (err != EOK) { + DHLOGE("memcpy_s encoder input producer surface buffer failed."); + return DCAMERA_MEMORY_OPT_ERROR; + } + inputTimeStampUs_ = GetEncoderTimeStamp(); + DHLOGD("Encoder input buffer size %d, timeStamp %lld.", inputBuffer->Size(), (long long)inputTimeStampUs_); + surfacebuffer->GetExtraData()->ExtraSet("timeStamp", inputTimeStampUs_); + BufferFlushConfig flushConfig = { {0, 0, sourceConfig_.GetWidth(), sourceConfig_.GetHeight()}, 0}; + SurfaceError ret = encodeProducerSurface_->FlushBuffer(surfacebuffer, -1, flushConfig); + if (ret != SURFACE_ERROR_OK) { + DHLOGE("Flush encoder input producer surface buffer failed."); + return DCAMERA_BAD_OPERATE; + } + return DCAMERA_OK; +} + +sptr EncodeDataProcess::GetEncoderInputSurfaceBuffer() +{ + BufferRequestConfig requestConfig; + requestConfig.width = static_cast(sourceConfig_.GetWidth()); + requestConfig.height = static_cast(sourceConfig_.GetHeight()); + requestConfig.usage = HBM_USE_CPU_READ | HBM_USE_CPU_WRITE | HBM_USE_MEM_DMA; + requestConfig.timeout = 0; + requestConfig.strideAlignment = ENCODER_STRIDE_ALIGNMENT; + switch (sourceConfig_.GetVideoformat()) { + case Videoformat::YUVI420: + requestConfig.format = PixelFormat::PIXEL_FMT_YCBCR_420_P; + break; + case Videoformat::NV12: + requestConfig.format = PixelFormat::PIXEL_FMT_YCBCR_420_SP; + break; + case Videoformat::NV21: + requestConfig.format = PixelFormat::PIXEL_FMT_YCRCB_420_SP; + break; + default: + DHLOGE("The current pixel format does not support encoding."); + return nullptr; + } + sptr surfacebuffer = nullptr; + int32_t flushFence = -1; + GSError err = encodeProducerSurface_->RequestBuffer(surfacebuffer, flushFence, requestConfig); + if (err != GSERROR_OK || surfacebuffer == nullptr) { + DHLOGE("Request encoder input producer surface buffer failed, error code: %d.", err); + } + return surfacebuffer; +} + +int64_t EncodeDataProcess::GetEncoderTimeStamp() +{ + int64_t TimeDifferenceStampUs = 0; + const int64_t nsPerUs = 1000L; + int64_t nowTimeUs = GetNowTimeStampUs() * nsPerUs; + if (lastFeedEncoderInputBufferTimeUs_ == 0) { + lastFeedEncoderInputBufferTimeUs_ = nowTimeUs; + return TimeDifferenceStampUs; + } + TimeDifferenceStampUs = nowTimeUs - lastFeedEncoderInputBufferTimeUs_; + lastFeedEncoderInputBufferTimeUs_ = nowTimeUs; + return TimeDifferenceStampUs; +} + +int32_t EncodeDataProcess::GetEncoderOutputBuffer(uint32_t index, Media::AVCodecBufferInfo info) +{ + DHLOGD("Get encoder output buffer."); + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before output encoded data."); + return DCAMERA_BAD_VALUE; + } + std::shared_ptr sharedMemoryOutput = videoEncoder_->GetOutputBuffer(index); + if (sharedMemoryOutput == nullptr) { + DHLOGE("Failed to get the output shared memory, index : %u", index); + return DCAMERA_BAD_OPERATE; + } + + if (info.size <= 0) { + DHLOGE("AVCodecBufferInfo error, buffer size : %d", info.size); + return DCAMERA_BAD_VALUE; + } + + size_t outputMemoDataSize = static_cast(info.size); + DHLOGD("Encoder output buffer size : %d", outputMemoDataSize); + std::shared_ptr bufferOutput = std::make_shared(outputMemoDataSize); + errno_t err = memcpy_s(bufferOutput->Data(), bufferOutput->Size(), + sharedMemoryOutput->GetBase(), outputMemoDataSize); + if (err != EOK) { + DHLOGE("memcpy_s buffer failed."); + return DCAMERA_MEMORY_OPT_ERROR; + } + bufferOutput->SetInt64("timeUs", info.presentationTimeUs); + + std::vector> nextInputBuffers; + nextInputBuffers.push_back(bufferOutput); + return EncodeDone(nextInputBuffers); +} + +int32_t EncodeDataProcess::EncodeDone(std::vector> outputBuffers) +{ + DHLOGD("Encoder done."); + if (outputBuffers.empty()) { + DHLOGE("The received data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + + if (nextDataProcess_ != nullptr) { + DHLOGD("Send to the next node of the encoder for processing."); + int32_t err = nextDataProcess_->ProcessData(outputBuffers); + if (err != DCAMERA_OK) { + DHLOGE("Someone node after the encoder processes failed."); + } + return err; + } + DHLOGD("The current node is the last node, and Output the processed video buffer"); + std::shared_ptr targetPipelineSink = callbackPipelineSink_.lock(); + if (targetPipelineSink == nullptr) { + DHLOGE("callbackPipelineSink_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + targetPipelineSink->OnProcessedVideoBuffer(outputBuffers[0]); + return DCAMERA_OK; +} + +void EncodeDataProcess::OnError() +{ + DHLOGD("EncodeDataProcess : OnError."); + isEncoderProcess_ = false; + videoEncoder_->Flush(); + videoEncoder_->Stop(); + std::shared_ptr targetPipelineSink = callbackPipelineSink_.lock(); + if (targetPipelineSink == nullptr) { + DHLOGE("callbackPipelineSink_ is nullptr."); + return; + } + targetPipelineSink->OnError(DataProcessErrorType::ERROR_PIPELINE_ENCODER); +} + +void EncodeDataProcess::OnInputBufferAvailable(uint32_t index) +{ + DHLOGD("The available input buffer index : %u. No operation when using surface input.", index); +} + +void EncodeDataProcess::OnOutputFormatChanged(const Media::Format &format) +{ + if (encodeOutputFormat_.GetFormatMap().empty()) { + DHLOGE("The first changed video encoder output format is null."); + return; + } + encodeOutputFormat_ = format; +} + +void EncodeDataProcess::OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, + Media::AVCodecBufferFlag flag) +{ + if (!isEncoderProcess_) { + DHLOGE("EncodeNode occurred error or start release."); + return; + } + DHLOGD("Video encode buffer info: presentation TimeUs %lld, size %d, offset %d, flag %d", + info.presentationTimeUs, info.size, info.offset, flag); + int32_t err = GetEncoderOutputBuffer(index, info); + if (err != DCAMERA_OK) { + DHLOGE("Get encode output Buffer failed."); + return; + } + { + std::lock_guard lck(mtxHoldCount_); + if (waitEncoderOutputCount_ <= 0) { + DHLOGE("The waitEncoderOutputCount_ = %d.", waitEncoderOutputCount_); + } + waitEncoderOutputCount_--; + DHLOGD("Wait encoder output frames number is %d.", waitEncoderOutputCount_); + } + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before release output buffer index."); + return; + } + int32_t errRelease = videoEncoder_->ReleaseOutputBuffer(index); + if (errRelease != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("The video encoder release output buffer failed, index : [%u].", index); + } +} +VideoConfigParams EncodeDataProcess::GetSourceConfig() const +{ + return sourceConfig_; +} + +VideoConfigParams EncodeDataProcess::GetTargetConfig() const +{ + return targetConfig_; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp b/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp new file mode 100644 index 00000000..000e5c8b --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_data_process_common.cpp @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "encode_data_process.h" + +#include + +#include "display_type.h" +#include "distributed_hardware_log.h" +#include "graphic_common_c.h" + +#include "dcamera_utils_tools.h" +#include "encode_video_callback.h" + +#ifndef DH_LOG_TAG +#define DH_LOG_TAG "DCDP_NODE_ENCODEC" +#endif + +namespace OHOS { +namespace DistributedHardware { +const std::map EncodeDataProcess::ENCODER_BITRATE_TABLE = { + std::map::value_type(WIDTH_320_HEIGHT_240, BITRATE_500000), + std::map::value_type(WIDTH_480_HEIGHT_360, BITRATE_1110000), + std::map::value_type(WIDTH_640_HEIGHT_360, BITRATE_1500000), + std::map::value_type(WIDTH_640_HEIGHT_480, BITRATE_1800000), + std::map::value_type(WIDTH_720_HEIGHT_540, BITRATE_2100000), + std::map::value_type(WIDTH_960_HEIGHT_540, BITRATE_2300000), + std::map::value_type(WIDTH_960_HEIGHT_720, BITRATE_2800000), + std::map::value_type(WIDTH_1280_HEIGHT_720, BITRATE_3400000), + std::map::value_type(WIDTH_1440_HEIGHT_1080, BITRATE_5000000), + std::map::value_type(WIDTH_1920_HEIGHT_1080, BITRATE_6000000), +}; + +EncodeDataProcess::~EncodeDataProcess() +{ + if (isEncoderProcess_) { + DHLOGD("~EncodeDataProcess : ReleaseProcessNode."); + ReleaseProcessNode(); + } +} + +int32_t EncodeDataProcess::InitNode() +{ + DHLOGD("Common Init DCamera EncodeNode start."); + if (!(IsInEncoderRange(sourceConfig_) && IsInEncoderRange(targetConfig_))) { + DHLOGE("Common Source config or target config are invalid."); + return DCAMERA_BAD_VALUE; + } + if (!IsConvertible(sourceConfig_, targetConfig_)) { + DHLOGE("Common The EncodeNode cannot convert source VideoCodecType %d to target VideoCodecType %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DCAMERA_BAD_TYPE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("Common Disable EncodeNode. The target VideoCodecType %d is the same as the source VideoCodecType %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return DCAMERA_OK; + } + + int32_t err = InitEncoder(); + if (err != DCAMERA_OK) { + DHLOGE("Common Init video encoder fail."); + ReleaseProcessNode(); + return err; + } + isEncoderProcess_ = true; + return DCAMERA_OK; +} + +bool EncodeDataProcess::IsInEncoderRange(const VideoConfigParams& curConfig) +{ + return (curConfig.GetWidth() >= MIN_VIDEO_WIDTH || curConfig.GetWidth() <= MAX_VIDEO_WIDTH || + curConfig.GetHeight() >= MIN_VIDEO_HEIGHT || curConfig.GetHeight() <= MAX_VIDEO_HEIGHT || + curConfig.GetFrameRate() <= MAX_FRAME_RATE); +} + +bool EncodeDataProcess::IsConvertible(const VideoConfigParams& sourceConfig, const VideoConfigParams& targetConfig) +{ + return (sourceConfig.GetVideoCodecType() == targetConfig.GetVideoCodecType() || + sourceConfig.GetVideoCodecType() == VideoCodecType::NO_CODEC); +} + +int32_t EncodeDataProcess::InitEncoder() +{ + DHLOGD("Common Init video encoder."); + int32_t err = InitEncoderMetadataFormat(); + if (err != DCAMERA_OK) { + DHLOGE("Common Init video encoder metadata format fail."); + return err; + } + err = InitEncoderBitrateFormat(); + if (err != DCAMERA_OK) { + DHLOGE("Common Init video encoder bitrate format fail."); + return err; + } + + videoEncoder_ = Media::VideoEncoderFactory::CreateByMime(processType_); + if (videoEncoder_ == nullptr) { + DHLOGE("Create video encoder failed."); + return DCAMERA_INIT_ERR; + } + encodeVideoCallback_ = std::make_shared(shared_from_this()); + int32_t retVal = videoEncoder_->SetCallback(encodeVideoCallback_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video encoder callback failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoEncoder_->Configure(metadataFormat_); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Set video encoder metadata format failed."); + return DCAMERA_INIT_ERR; + } + encodeProducerSurface_ = videoEncoder_->CreateInputSurface(); + if (encodeProducerSurface_ == nullptr) { + DHLOGE("Get video encoder producer surface failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoEncoder_->Prepare(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video encoder prepare failed."); + return DCAMERA_INIT_ERR; + } + retVal = videoEncoder_->Start(); + if (retVal != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("Video encoder start failed."); + return DCAMERA_INIT_ERR; + } + DHLOGI("Common Init video encoder success"); + return DCAMERA_OK; +} + +int32_t EncodeDataProcess::InitEncoderMetadataFormat() +{ + DHLOGD("Common Init video encoder metadata format."); + processType_ = "video/mp4v-es"; + metadataFormat_.PutStringValue("codec_mime", processType_); + metadataFormat_.PutIntValue("codec_profile", Media::MPEG4Profile::MPEG4_PROFILE_ADVANCED_CODING); + + int32_t width = (int32_t)sourceConfig_.GetWidth(); + int32_t height = (int32_t)sourceConfig_.GetHeight(); + metadataFormat_.PutIntValue("pixel_format", Media::VideoPixelFormat::RGBA); + metadataFormat_.PutLongValue("max_input_size", width * height * 4); + metadataFormat_.PutIntValue("width", width); + metadataFormat_.PutIntValue("height", height); + metadataFormat_.PutIntValue("frame_rate", MAX_FRAME_RATE); + return DCAMERA_OK; +} + +int32_t EncodeDataProcess::InitEncoderBitrateFormat() +{ + DHLOGD("Init video encoder bitrate format."); + if (!(IsInEncoderRange(sourceConfig_) && IsInEncoderRange(targetConfig_))) { + DHLOGE("Source config or target config are invalid."); + return DCAMERA_BAD_VALUE; + } + metadataFormat_.PutIntValue("i_frame_interval", IDR_FRAME_INTERVAL_MS); + metadataFormat_.PutIntValue("video_encode_bitrate_mode", Media::VideoEncodeBitrateMode::VBR); + + if (ENCODER_BITRATE_TABLE.empty()) { + DHLOGD("ENCODER_BITRATE_TABLE is null, use the default bitrate of the encoder."); + return DCAMERA_OK; + } + int64_t pixelformat = static_cast(sourceConfig_.GetWidth() * sourceConfig_.GetHeight()); + int32_t matchedBitrate = BITRATE_6000000; + int64_t minPixelformatDiff = WIDTH_1920_HEIGHT_1080 - pixelformat; + for (auto it = ENCODER_BITRATE_TABLE.begin(); it != ENCODER_BITRATE_TABLE.end(); it++) { + int64_t pixelformatDiff = abs(pixelformat - it->first); + if (pixelformatDiff == 0) { + matchedBitrate = it->second; + break; + } + if (minPixelformatDiff >= pixelformatDiff) { + minPixelformatDiff = pixelformatDiff; + matchedBitrate = it->second; + } + } + DHLOGD("Source config: width : %d, height : %d, matched bitrate %d.", sourceConfig_.GetWidth(), + sourceConfig_.GetHeight(), matchedBitrate); + metadataFormat_.PutIntValue("bitrate", matchedBitrate); + return DCAMERA_OK; +} + +void EncodeDataProcess::ReleaseProcessNode() +{ + DHLOGD("Start release [%d] node : EncodeNode.", nodeRank_); + isEncoderProcess_ = false; + if (nextDataProcess_ != nullptr) { + nextDataProcess_->ReleaseProcessNode(); + } + + { + std::lock_guard lck(mtxEncoderState_); + if (videoEncoder_ != nullptr) { + DHLOGD("Start release videoEncoder."); + videoEncoder_->Flush(); + videoEncoder_->Stop(); + videoEncoder_->Release(); + encodeProducerSurface_ = nullptr; + videoEncoder_ = nullptr; + encodeVideoCallback_ = nullptr; + } + } + + waitEncoderOutputCount_ = 0; + lastFeedEncoderInputBufferTimeUs_ = 0; + inputTimeStampUs_ = 0; + processType_ = ""; + DHLOGD("Release [%d] node : EncodeNode end.", nodeRank_); +} + +int32_t EncodeDataProcess::ProcessData(std::vector>& inputBuffers) +{ + DHLOGD("Process data in EncodeDataProcess."); + if (inputBuffers.empty()) { + DHLOGE("The input data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + if (sourceConfig_.GetVideoCodecType() == targetConfig_.GetVideoCodecType()) { + DHLOGD("The target VideoCodecType : %d is the same as the source VideoCodecType : %d.", + sourceConfig_.GetVideoCodecType(), targetConfig_.GetVideoCodecType()); + return EncodeDone(inputBuffers); + } + + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before encoding data."); + return DCAMERA_INIT_ERR; + } + size_t bufferSize = 1920 * 1808 * 4; + if (inputBuffers[0]->Size() > bufferSize) { + DHLOGE("EncodeNode input buffer size %d error.", inputBuffers[0]->Size()); + return DCAMERA_MEMORY_OPT_ERROR; + } + if (!isEncoderProcess_) { + DHLOGE("EncodeNode occurred error or start release."); + return DCAMERA_DISABLE_PROCESS; + } + int32_t err = FeedEncoderInputBuffer(inputBuffers[0]); + if (err != DCAMERA_OK) { + DHLOGE("Feed encoder input Buffer fail."); + return err; + } + { + std::lock_guard lck(mtxHoldCount_); + if (inputTimeStampUs_ == 0) { + waitEncoderOutputCount_ += FIRST_FRAME_OUTPUT_NUM; + } else { + waitEncoderOutputCount_++; + } + DHLOGD("Wait encoder output frames number is %d.", waitEncoderOutputCount_); + } + return DCAMERA_OK; +} + +int32_t EncodeDataProcess::FeedEncoderInputBuffer(std::shared_ptr& inputBuffer) +{ + std::lock_guard lck(mtxEncoderState_); + DHLOGD("Feed encoder input buffer, buffer size %d.", inputBuffer->Size()); + if (encodeProducerSurface_ == nullptr) { + DHLOGE("Get encoder input producer surface failed."); + return DCAMERA_INIT_ERR; + } + sptr surfacebuffer = GetEncoderInputSurfaceBuffer(); + if (surfacebuffer == nullptr) { + DHLOGE("Get encoder input producer surface buffer failed."); + return DCAMERA_BAD_OPERATE; + } + uint8_t *addr = static_cast(surfacebuffer->GetVirAddr()); + if (addr == nullptr) { + DHLOGE("SurfaceBuffer address is nullptr"); + encodeProducerSurface_->CancelBuffer(surfacebuffer); + return DCAMERA_BAD_OPERATE; + } + size_t size = static_cast(surfacebuffer->GetSize()); + errno_t err = memcpy_s(addr, size, inputBuffer->Data(), inputBuffer->Size()); + DHLOGI("FeedEncoderInputBuffer size: %d, bufferSize: %d, err: %d", size, inputBuffer->Size(), err); + if (err != EOK) { + DHLOGE("memcpy_s encoder input producer surface buffer failed."); + return DCAMERA_MEMORY_OPT_ERROR; + } + inputTimeStampUs_ = GetEncoderTimeStamp(); + DHLOGD("Encoder input buffer size %d, timeStamp %lld.", inputBuffer->Size(), (long long)inputTimeStampUs_); + + surfacebuffer->GetExtraData()->ExtraSet("timeStamp", inputTimeStampUs_); + BufferFlushConfig flushConfig = { {0, 0, sourceConfig_.GetWidth(), sourceConfig_.GetHeight()}, 0}; + SurfaceError ret = encodeProducerSurface_->FlushBuffer(surfacebuffer, -1, flushConfig); + if (ret != SURFACE_ERROR_OK) { + DHLOGE("Flush encoder input producer surface buffer failed."); + return DCAMERA_BAD_OPERATE; + } + return DCAMERA_OK; +} + +sptr EncodeDataProcess::GetEncoderInputSurfaceBuffer() +{ + BufferRequestConfig requestConfig; + requestConfig.width = static_cast(sourceConfig_.GetWidth()); + requestConfig.height = static_cast(sourceConfig_.GetHeight()); + requestConfig.usage = HBM_USE_CPU_READ | HBM_USE_CPU_WRITE | HBM_USE_MEM_DMA; + requestConfig.timeout = 0; + requestConfig.strideAlignment = ENCODER_STRIDE_ALIGNMENT; + requestConfig.format = PixelFormat::PIXEL_FMT_RGBA_8888; + sptr surfacebuffer = nullptr; + int32_t flushFence = -1; + GSError err = encodeProducerSurface_->RequestBuffer(surfacebuffer, flushFence, requestConfig); + if (err != GSERROR_OK || surfacebuffer == nullptr) { + DHLOGE("Request encoder input producer surface buffer failed, error code: %d.", err); + } + return surfacebuffer; +} + +int64_t EncodeDataProcess::GetEncoderTimeStamp() +{ + const int64_t nsPerUs = 1000L; + int64_t nowTimeUs = GetNowTimeStampUs() * nsPerUs; + return nowTimeUs; +} + +int32_t EncodeDataProcess::GetEncoderOutputBuffer(uint32_t index, Media::AVCodecBufferInfo info) +{ + DHLOGD("Get encoder output buffer."); + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before output encoded data."); + return DCAMERA_BAD_VALUE; + } + std::shared_ptr sharedMemoryOutput = videoEncoder_->GetOutputBuffer(index); + if (sharedMemoryOutput == nullptr) { + DHLOGE("Failed to get the output shared memory, index : %d", index); + return DCAMERA_BAD_OPERATE; + } + + if (info.size <= 0) { + DHLOGE("AVCodecBufferInfo error, buffer size : %d", info.size); + return DCAMERA_BAD_VALUE; + } + + size_t outputMemoDataSize = static_cast(info.size); + DHLOGD("Encoder output buffer size : %d", outputMemoDataSize); + std::shared_ptr bufferOutput = std::make_shared(outputMemoDataSize); + errno_t err = memcpy_s(bufferOutput->Data(), bufferOutput->Size(), + sharedMemoryOutput->GetBase(), outputMemoDataSize); + if (err != EOK) { + DHLOGE("memcpy_s buffer failed."); + return DCAMERA_MEMORY_OPT_ERROR; + } + bufferOutput->SetInt64("timeUs", info.presentationTimeUs); + + std::vector> nextInputBuffers; + nextInputBuffers.push_back(bufferOutput); + return EncodeDone(nextInputBuffers); +} + +int32_t EncodeDataProcess::EncodeDone(std::vector> outputBuffers) +{ + DHLOGD("Encoder done."); + if (outputBuffers.empty()) { + DHLOGE("The received data buffers is empty."); + return DCAMERA_BAD_VALUE; + } + + if (nextDataProcess_ != nullptr) { + DHLOGD("Send to the next node of the encoder for processing."); + int32_t err = nextDataProcess_->ProcessData(outputBuffers); + if (err != DCAMERA_OK) { + DHLOGE("Someone node after the encoder processes fail."); + } + return err; + } + DHLOGD("The current node is the last node, and Output the processed video buffer"); + std::shared_ptr targetPipelineSink = callbackPipelineSink_.lock(); + if (targetPipelineSink == nullptr) { + DHLOGE("callbackPipelineSink_ is nullptr."); + return DCAMERA_BAD_VALUE; + } + targetPipelineSink->OnProcessedVideoBuffer(outputBuffers[0]); + return DCAMERA_OK; +} + +void EncodeDataProcess::OnError() +{ + DHLOGD("EncodeDataProcess : OnError."); + isEncoderProcess_ = false; + videoEncoder_->Flush(); + videoEncoder_->Stop(); + std::shared_ptr targetPipelineSink = callbackPipelineSink_.lock(); + if (targetPipelineSink == nullptr) { + DHLOGE("callbackPipelineSink_ is nullptr."); + return; + } + targetPipelineSink->OnError(DataProcessErrorType::ERROR_PIPELINE_ENCODER); +} + +void EncodeDataProcess::OnInputBufferAvailable(uint32_t index) +{ + DHLOGD("The available input buffer index : %d. No operation when using surface input.", index); +} + +void EncodeDataProcess::OnOutputFormatChanged(const Media::Format &format) +{ + if (encodeOutputFormat_.GetFormatMap().empty()) { + DHLOGE("The first changed video encoder output format is null."); + return; + } + encodeOutputFormat_ = format; +} + +void EncodeDataProcess::OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, + Media::AVCodecBufferFlag flag) +{ + if (!isEncoderProcess_) { + DHLOGE("EncodeNode occurred error or start release."); + return; + } + DHLOGD("Video encode buffer info: presentation TimeUs %lld, size %d, offset %d, flag %d", + info.presentationTimeUs, info.size, info.offset, flag); + int32_t err = GetEncoderOutputBuffer(index, info); + if (err != DCAMERA_OK) { + DHLOGE("Get encode output Buffer fail."); + return; + } + { + std::lock_guard lck(mtxHoldCount_); + if (waitEncoderOutputCount_ <= 0) { + DHLOGE("The waitEncoderOutputCount_ = %d.", waitEncoderOutputCount_); + } + waitEncoderOutputCount_--; + DHLOGD("Wait encoder output frames number is %d.", waitEncoderOutputCount_); + } + if (videoEncoder_ == nullptr) { + DHLOGE("The video encoder does not exist before release output buffer index."); + return; + } + int32_t errRelease = videoEncoder_->ReleaseOutputBuffer(index); + if (errRelease != Media::MediaServiceErrCode::MSERR_OK) { + DHLOGE("The video encoder release output buffer fail, index : [%d].", index); + } +} +VideoConfigParams EncodeDataProcess::GetSourceConfig() const +{ + return sourceConfig_; +} + +VideoConfigParams EncodeDataProcess::GetTargetConfig() const +{ + return targetConfig_; +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_video_callback.cpp b/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_video_callback.cpp new file mode 100644 index 00000000..ed321418 --- /dev/null +++ b/services/data_process_yuan/src/pipeline_node/multimedia_codec/encode_video_callback.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include "distributed_hardware_log.h" + +namespace OHOS { +namespace DistributedHardware { +void EncodeVideoCallback::OnError(Media::AVCodecErrorType errorType, int32_t errorCode) +{ + DHLOGD("EncodeVideoCallback : OnError. Error type: %d. Error code: %d ", errorType, errorCode); + std::shared_ptr targetEncoderNode = encodeVideoNode_.lock(); + if (targetEncoderNode == nullptr) { + DHLOGE("encodeVideoNode_ is nullptr."); + return; + } + targetEncoderNode->OnError(); +} + +void EncodeVideoCallback::OnInputBufferAvailable(uint32_t index) +{ + DHLOGD("EncodeVideoCallback : OnInputBufferAvailable. No operation when using surface input."); + std::shared_ptr targetEncoderNode = encodeVideoNode_.lock(); + if (targetEncoderNode == nullptr) { + DHLOGE("encodeVideoNode_ is nullptr."); + return; + } + targetEncoderNode->OnInputBufferAvailable(index); +} + +void EncodeVideoCallback::OnOutputFormatChanged(const Media::Format &format) +{ + DHLOGD("EncodeVideoCallback : OnOutputFormatChanged."); + std::shared_ptr targetEncoderNode = encodeVideoNode_.lock(); + if (targetEncoderNode == nullptr) { + DHLOGE("encodeVideoNode_ is nullptr."); + return; + } + targetEncoderNode->OnOutputFormatChanged(format); +} + +void EncodeVideoCallback::OnOutputBufferAvailable(uint32_t index, Media::AVCodecBufferInfo info, + Media::AVCodecBufferFlag flag) +{ + DHLOGD("EncodeVideoCallback : OnOutputBufferAvailable."); + std::shared_ptr targetEncoderNode = encodeVideoNode_.lock(); + if (targetEncoderNode == nullptr) { + DHLOGE("encodeVideoNode_ is nullptr."); + return; + } + targetEncoderNode->OnOutputBufferAvailable(index, info, flag); +} +} // namespace DistributedHardware +} // namespace OHOS diff --git a/services/data_process_yuan/src/utils/image_common_type.cpp b/services/data_process_yuan/src/utils/image_common_type.cpp new file mode 100644 index 00000000..5990cb40 --- /dev/null +++ b/services/data_process_yuan/src/utils/image_common_type.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "image_common_type.h" + +namespace OHOS { +namespace DistributedHardware { +void VideoConfigParams::SetVideoCodecType(VideoCodecType videoCodec) +{ + videoCodec_ = videoCodec; +} + +void VideoConfigParams::SetVideoformat(Videoformat pixelFormat) +{ + pixelFormat_ = pixelFormat; +} + +void VideoConfigParams::SetFrameRate(uint32_t frameRate) +{ + frameRate_ = frameRate; +} + +void VideoConfigParams::SetWidthAndHeight(uint32_t width, uint32_t height) +{ + width_ = width; + height_ = height; +} + +VideoCodecType VideoConfigParams::GetVideoCodecType() const +{ + return videoCodec_; +} + +Videoformat VideoConfigParams::GetVideoformat() const +{ + return pixelFormat_; +} + +uint32_t VideoConfigParams::GetFrameRate() const +{ + return frameRate_; +} + +uint32_t VideoConfigParams::GetWidth() const +{ + return width_; +} + +uint32_t VideoConfigParams::GetHeight() const +{ + return height_; +} +} // namespace DistributedHardware +} // namespace OHOS -- Gitee