From bf6ce602009676c2d5485a500fdbfa596cfa3feb Mon Sep 17 00:00:00 2001 From: zhuruigan Date: Fri, 19 Jan 2024 15:34:37 +0800 Subject: [PATCH] =?UTF-8?q?=E6=96=B0=E5=A2=9Eextension=E5=A2=9E=E9=87=8F?= =?UTF-8?q?=E5=A4=87=E4=BB=BD=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: zhuruigan Change-Id: Ia5a4d8263027e46fe0bce059e5dcadcae3578948 --- .../native/backup_ext/include/ext_extension.h | 19 + .../native/backup_ext/include/tar_file.h | 4 + .../native/backup_ext/src/ext_extension.cpp | 389 +++++++++++++++++- frameworks/native/backup_ext/src/tar_file.cpp | 30 +- utils/include/b_error/b_error.h | 5 + 5 files changed, 420 insertions(+), 27 deletions(-) diff --git a/frameworks/native/backup_ext/include/ext_extension.h b/frameworks/native/backup_ext/include/ext_extension.h index b822b23f4..2119f6d83 100644 --- a/frameworks/native/backup_ext/include/ext_extension.h +++ b/frameworks/native/backup_ext/include/ext_extension.h @@ -21,7 +21,10 @@ #include #include +#include + #include "b_json/b_json_entity_extension_config.h" +#include "b_json/b_report_entity.h" #include "b_resources/b_constants.h" #include "ext_backup_js.h" #include "ext_extension_stub.h" @@ -102,6 +105,22 @@ private: void AsyncTaskOnBackup(); + int DoIncrementalBackup(const std::map &allFiles, + const std::map &smallFiles, + const std::map &bigFiles, + const std::map &bigInfos); + + void AsyncTaskOnIncrementalBackup(const std::map &allFiles, + const std::map &smallFiles, + const std::map &bigFiles, + const std::map &bigInfos); + + /** + * @brief extension incremental backup restore is done + * + * @param errCode + */ + void AppIncrementalDone(ErrCode errCode); private: std::shared_mutex lock_; std::shared_ptr extension_; diff --git a/frameworks/native/backup_ext/include/tar_file.h b/frameworks/native/backup_ext/include/tar_file.h index 4e1a8a28e..b768511f3 100644 --- a/frameworks/native/backup_ext/include/tar_file.h +++ b/frameworks/native/backup_ext/include/tar_file.h @@ -87,6 +87,8 @@ public: const std::string &tarFileName, const std::string &pkPath, TarMap &tarMap); + + void SetPacketMode(bool isSingle); private: TarFile() {} @@ -226,6 +228,8 @@ private: uint32_t tarFileCount_ {0}; std::string currentFileName_ {}; + + bool isSingle_ = false; }; } // namespace OHOS::FileManagement::Backup diff --git a/frameworks/native/backup_ext/src/ext_extension.cpp b/frameworks/native/backup_ext/src/ext_extension.cpp index 0c0c71961..d0501dfcb 100644 --- a/frameworks/native/backup_ext/src/ext_extension.cpp +++ b/frameworks/native/backup_ext/src/ext_extension.cpp @@ -16,9 +16,13 @@ #include "ext_extension.h" #include +#include +#include #include +#include #include #include +#include #include #include @@ -36,8 +40,10 @@ #include "b_error/b_excep_utils.h" #include "b_filesystem/b_dir.h" #include "b_filesystem/b_file.h" +#include "b_filesystem/b_file_hash.h" #include "b_json/b_json_cached_entity.h" #include "b_json/b_json_entity_ext_manage.h" +#include "b_json/b_report_entity.h" #include "b_resources/b_constants.h" #include "b_tarball/b_tarball_factory.h" #include "filemgmt_libhilog.h" @@ -47,14 +53,21 @@ namespace OHOS::FileManagement::Backup { const string DEFAULT_TAR_PKG = "1.tar"; -const string INDEX_FILE_BACKUP = string(BConstants::PATH_BUNDLE_BACKUP_HOME). - append(BConstants::SA_BUNDLE_BACKUP_BACKUP). - append(BConstants::EXT_BACKUP_MANAGE); -const string INDEX_FILE_RESTORE = string(BConstants::PATH_BUNDLE_BACKUP_HOME). - append(BConstants::SA_BUNDLE_BACKUP_RESTORE). - append(BConstants::EXT_BACKUP_MANAGE); +const string INDEX_FILE_BACKUP = string(BConstants::PATH_BUNDLE_BACKUP_HOME) + .append(BConstants::SA_BUNDLE_BACKUP_BACKUP) + .append(BConstants::EXT_BACKUP_MANAGE); +const string INDEX_FILE_RESTORE = string(BConstants::PATH_BUNDLE_BACKUP_HOME) + .append(BConstants::SA_BUNDLE_BACKUP_RESTORE) + .append(BConstants::EXT_BACKUP_MANAGE); +const string INDEX_FILE_INCREMENTAL_BACKUP = + string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_BACKUP); using namespace std; +namespace { +const int64_t DEFAULT_SLICE_SIZE = 100 * 1024 * 1024; // 分片文件大小为100M +const uint32_t MAX_FILE_COUNT = 6000; // 单个tar包最多包含6000个文件 +} // namespace + void BackupExtExtension::VerifyCaller() { HILOGI("begin"); @@ -70,7 +83,7 @@ void BackupExtExtension::VerifyCaller() } } -static bool CheckAndCreateDirectory(const string& filePath) +static bool CheckAndCreateDirectory(const string &filePath) { size_t pos = filePath.rfind('/'); if (pos == string::npos) { @@ -316,8 +329,7 @@ static bool IsUserTar(const string &tarFile, const string &indexFile) BJsonCachedEntity cachedEntity(UniqueFd(open(filePath.data(), O_RDONLY))); auto cache = cachedEntity.Structuralize(); auto info = cache.GetExtManageInfo(); - auto iter = find_if(info.begin(), info.end(), - [&tarFile](const auto& item) { return item.hashName == tarFile; }); + auto iter = find_if(info.begin(), info.end(), [&tarFile](const auto &item) { return item.hashName == tarFile; }); if (iter != info.end()) { HILOGI("tarFile:%{public}s isUserTar:%{public}d", tarFile.data(), iter->isUserTar); return iter->isUserTar; @@ -465,7 +477,7 @@ void BackupExtExtension::AsyncTaskBackup(const string config) static void RestoreBigFilesForSpecialCloneCloud(ExtManageInfo item) { - struct stat& sta = item.sta; + struct stat &sta = item.sta; string fileName = item.hashName; if (chmod(fileName.c_str(), sta.st_mode) != 0) { HILOGE("Failed to chmod filePath, err = %{public}d", errno); @@ -527,8 +539,7 @@ static ErrCode RestoreFilesForSpecialCloneCloud() return ERR_OK; } -static bool RestoreBigFilePrecheck(string& fileName, const string& path, - const string& hashName, const string& filePath) +static bool RestoreBigFilePrecheck(string &fileName, const string &path, const string &hashName, const string &filePath) { if (filePath.empty()) { HILOGE("file path is empty. %{public}s", filePath.c_str()); @@ -549,8 +560,10 @@ static bool RestoreBigFilePrecheck(string& fileName, const string& path, return true; } -static void RestoreBigFileAfter(const string& fileName, const string& filePath, const struct stat& sta, - const set& lks) +static void RestoreBigFileAfter(const string &fileName, + const string &filePath, + const struct stat &sta, + const set &lks) { if (chmod(filePath.c_str(), sta.st_mode) != 0) { HILOGE("Failed to chmod filePath, err = %{public}d", errno); @@ -612,7 +625,7 @@ static void RestoreBigFiles(bool appendTargetPath) HILOGE("failed to copy the file. err = %{public}d", errno); continue; } - + RestoreBigFileAfter(fileName, filePath, item.sta, cache.GetHardLinkInfo(item.hashName)); } } @@ -663,8 +676,8 @@ void BackupExtExtension::AsyncTaskRestore() } // 恢复用户tar包以及大文件 // 目的地址是否需要拼接path(临时目录),FullBackupOnly为true并且非特殊场景 - bool appendTargetPath = ptr->extension_->UseFullBackupOnly() && - !ptr->extension_->SpeicalVersionForCloneAndCloud(); + bool appendTargetPath = + ptr->extension_->UseFullBackupOnly() && !ptr->extension_->SpeicalVersionForCloneAndCloud(); RestoreBigFiles(appendTargetPath); // delete 1.tar/manage.json @@ -850,6 +863,16 @@ ErrCode BackupExtExtension::HandleRestore() ErrCode BackupExtExtension::HandleIncrementalBackup(UniqueFd incrementalFd, UniqueFd manifestFd) { + string usrConfig = extension_->GetUsrConfig(); + BJsonCachedEntity cachedEntity(usrConfig); + auto cache = cachedEntity.Structuralize(); + if (!cache.GetAllowToBackupRestore()) { + HILOGE("Application does not allow backup or restore"); + return BError(BError::Codes::EXT_FORBID_BACKUP_RESTORE, "Application does not allow backup or restore") + .GetCode(); + } + auto [allFiles, smallFiles, bigFiles, bigInfos] = CompareFiles(move(manifestFd), move(incrementalFd)); + AsyncTaskOnIncrementalBackup(allFiles, smallFiles, bigFiles, bigInfos); return 0; } @@ -857,4 +880,336 @@ tuple BackupExtExtension::GetIncrementalBackupFileHandle() { return {UniqueFd(-1), UniqueFd(-1)}; } + +static string GetReportFileName(const string &fileName) +{ + string reportName = fileName + "." + string(BConstants::REPORT_FILE_EXT); + return reportName; +} + +using CompareFilesResult = tuple, + map, + map, + map>; + +static CompareFilesResult CompareFiles(const UniqueFd &cloudFd, const UniqueFd &storageFd) +{ + BReportEntity cloudRp(UniqueFd(cloudFd.Get())); + map cloudFiles = cloudRp.GetReportInfos(); + BReportEntity storageRp(UniqueFd(storageFd.Get())); + map storageFiles = storageRp.GetReportInfos(); + map allFiles = {}; + map smallFiles = {}; + map bigFiles = {}; + map bigInfos = {}; + for (auto &item : storageFiles) { + // 进行文件对比 + string path = item.first; + if (item.second.isIncremental == true && item.second.isDir == true) { + smallFiles.try_emplace(path, item.second); + } + if (item.second.isIncremental == true && item.second.isDir == false) { + auto [res, fileHash] = BFileHash::HashWithSHA256(path); + if (fileHash.empty()) { + continue; + } + item.second.hash = fileHash; + item.second.isIncremental = true; + } else { + item.second.hash = (cloudFiles.find(path) == cloudFiles.end()) ? cloudFiles[path].hash : ""; + } + + allFiles.try_emplace(path, item.second); + if (cloudFiles.find(path) == cloudFiles.end() || + (item.second.isDir == false && item.second.isIncremental == true && + cloudFiles.find(path)->second.hash != item.second.hash)) { + // 在云空间简报里不存在或者hash不一致 + if (item.second.size < BConstants::BIG_FILE_BOUNDARY) { + smallFiles.try_emplace(path, item.second); + continue; + } + struct stat sta = {}; + if (stat(path.c_str(), &sta) == -1) { + continue; + } + bigFiles.try_emplace(path, sta); + bigInfos.try_emplace(path, item.second); + } + } + HILOGI("compareFiles Find small files total: %{public}d", smallFiles.size()); + HILOGI("compareFiles Find big files total: %{public}d", bigFiles.size()); + return {allFiles, smallFiles, bigFiles, bigInfos}; +} + +static void WriteFile(const string &filename, const map &srcFiles) +{ + fstream f; + f.open(filename.data(), ios::out); + // 前面2行先填充进去 + f << "version=1.0&attrNum=6" << endl; + f << "path;mode;dir;size;mtime;hash" << endl; + for (auto item : srcFiles) { + struct ReportFileInfo info = item.second; + string str = item.first + ";" + info.mode + ";" + to_string(info.isDir) + ";" + to_string(info.size); + str += ";" + to_string(info.mtime) + ";" + info.hash; + f << str << endl; + } + f.close(); + HILOGI("WriteFile path: %{public}s", filename.c_str()); +} + +/** + * 获取增量的大文件的信息 + */ +static TarMap GetIncrmentBigInfos(const map &files) +{ + auto getStringHash = [](const TarMap &m, const string &str) -> string { + ostringstream strHex; + strHex << hex; + + hash strHash; + size_t szHash = strHash(str); + strHex << setfill('0') << setw(BConstants::BIG_FILE_NAME_SIZE) << szHash; + string name = strHex.str(); + for (int i = 0; m.find(name) != m.end(); ++i, strHex.str("")) { + szHash = strHash(str + to_string(i)); + strHex << setfill('0') << setw(BConstants::BIG_FILE_NAME_SIZE) << szHash; + name = strHex.str(); + } + + return name; + }; + + TarMap bigFiles; + for (const auto &item : files) { + string md5Name = getStringHash(bigFiles, item.first); + if (!md5Name.empty()) { + bigFiles.emplace(md5Name, make_tuple(item.first, item.second, true)); + } + } + + return bigFiles; +} + +/** + * 增量tar包和简报信息回传 + */ +static ErrCode IncrementalTarFileReady(const TarMap &bigFileInfo, + const map &srcFiles, + sptr proxy) +{ + string tarFile = bigFileInfo.begin()->first; + HILOGI("IncrementalTarFileReady: tar: %{public}s", tarFile.c_str()); + string manageFile = GetReportFileName(tarFile); + HILOGI("IncrementalTarFileReady: manageFile: %{public}s", tarFile.c_str()); + string file = string(INDEX_FILE_INCREMENTAL_BACKUP).append(manageFile); + WriteFile(file, srcFiles); + + string tarName = string(INDEX_FILE_INCREMENTAL_BACKUP).append(tarFile); + ErrCode ret = proxy->AppIncrementalFileReady(tarFile, UniqueFd(open(tarName.data(), O_RDONLY)), + UniqueFd(open(file.data(), O_RDONLY))); + if (SUCCEEDED(ret)) { + HILOGI("IncrementalTarFileReady: The application is packaged successfully"); + // 删除文件 + RemoveFile(file); + RemoveFile(tarName); + } else { + HILOGE("IncrementalTarFileReady interface fails to be invoked: %{public}d", ret); + } + return ret; +} + +/** + * 增量大文件和简报信息回传 + */ +static ErrCode IncrementalBigFileReady(const TarMap &pkgInfo, + const map &bigInfos, + sptr proxy) +{ + ErrCode ret {ERR_OK}; + for (auto &item : pkgInfo) { + if (item.first.empty()) { + continue; + } + auto [path, sta, isBeforeTar] = item.second; + + UniqueFd fd(open(path.data(), O_RDONLY)); + if (fd < 0) { + HILOGE("IncrementalBigFileReady open file failed, file name is %{public}s, err = %{public}d", path.c_str(), + errno); + continue; + } + + struct ReportFileInfo info = bigInfos.find(path)->second; + string file = GetReportFileName(string(INDEX_FILE_INCREMENTAL_BACKUP).append(item.first)); + map bigInfo; + bigInfo.try_emplace(path, info); + WriteFile(file, bigInfo); + + ret = proxy->AppIncrementalFileReady(item.first, std::move(fd), UniqueFd(open(file.data(), O_RDONLY))); + if (SUCCEEDED(ret)) { + HILOGI("IncrementalBigFileReady:The application is packaged successfully, package name is %{public}s", + item.first.c_str()); + RemoveFile(file); + } else { + HILOGE("IncrementalBigFileReady interface fails to be invoked: %{public}d", ret); + } + } + return ret; +} + +void BackupExtExtension::AsyncTaskOnIncrementalBackup(const map &allFiles, + const map &smallFiles, + const map &bigFiles, + const map &bigInfos) +{ + auto task = [obj {wptr(this)}, allFiles, smallFiles, bigFiles, bigInfos]() { + auto ptr = obj.promote(); + try { + BExcepUltils::BAssert(ptr, BError::Codes::EXT_BROKEN_FRAMEWORK, + "Ext extension handle have been already released"); + BExcepUltils::BAssert(ptr->extension_, BError::Codes::EXT_INVAL_ARG, + "extension handle have been already released"); + + auto ret = ptr->DoIncrementalBackup(allFiles, smallFiles, bigFiles, bigInfos); + ptr->AppIncrementalDone(ret); + HILOGE("Incremental backup app done %{public}d", ret); + } catch (const BError &e) { + ptr->AppIncrementalDone(e.GetCode()); + } catch (const exception &e) { + HILOGE("Catched an unexpected low-level exception %{public}s", e.what()); + ptr->AppIncrementalDone(BError(BError::Codes::EXT_INVAL_ARG).GetCode()); + } catch (...) { + HILOGE("Failed to restore the ext bundle"); + ptr->AppIncrementalDone(BError(BError::Codes::EXT_INVAL_ARG).GetCode()); + } + }; + + threadPool_.AddTask([task]() { + try { + task(); + } catch (...) { + HILOGE("Failed to add task to thread pool"); + } + }); +} + +static string GetIncrmentPartName() +{ + auto now = chrono::system_clock::now(); + auto duration = now.time_since_epoch(); + auto milliseconds = chrono::duration_cast(duration); + + return to_string(milliseconds.count()) + "_part"; +} + +static void IncrementalPacket(const map &infos, TarMap &tar, sptr proxy) +{ + HILOGI("IncrementalPacket begin"); + string path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_BACKUP); + int64_t totalSize = 0; + uint32_t fileCount = 0; + vector packFiles; + map tarInfos; + // 设置下打包模式 + TarFile::GetInstance().SetPacketMode(true); + string partName = GetIncrmentPartName(); + for (auto small : infos) { + totalSize += small.second.size; + fileCount += 1; + packFiles.emplace_back(small.first); + tarInfos.try_emplace(small.first, small.second); + if (totalSize >= DEFAULT_SLICE_SIZE || fileCount >= MAX_FILE_COUNT) { + TarMap tarMap {}; + TarFile::GetInstance().Packet(packFiles, partName, path, tarMap); + tar.insert(tarMap.begin(), tarMap.end()); + // 执行tar包回传功能 + IncrementalTarFileReady(tarMap, tarInfos, proxy); + totalSize = 0; + fileCount = 0; + packFiles.clear(); + tarInfos.clear(); + } + } + if (fileCount > 0) { + // 打包回传 + TarMap tarMap {}; + TarFile::GetInstance().Packet(packFiles, partName, path, tarMap); + IncrementalTarFileReady(tarMap, tarInfos, proxy); + tar.insert(tarMap.begin(), tarMap.end()); + packFiles.clear(); + tarInfos.clear(); + } +} + +static ErrCode IncrementalAllFileReady(const TarMap &pkgInfo, + const map &srcFiles, + sptr proxy) +{ + BJsonCachedEntity cachedEntity( + UniqueFd(open(INDEX_FILE_BACKUP.data(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR))); + auto cache = cachedEntity.Structuralize(); + cache.SetExtManage(pkgInfo); + cachedEntity.Persist(); + close(cachedEntity.GetFd().Release()); + + string file = GetReportFileName(string(INDEX_FILE_INCREMENTAL_BACKUP).append("all")); + WriteFile(file, srcFiles); + UniqueFd fd(open(INDEX_FILE_BACKUP.data(), O_RDONLY)); + UniqueFd manifestFd(open(file.data(), O_RDONLY)); + ErrCode ret = + proxy->AppIncrementalFileReady(string(BConstants::EXT_BACKUP_MANAGE), std::move(fd), std::move(manifestFd)); + if (SUCCEEDED(ret)) { + HILOGI("IncrementalAllFileReady successfully"); + RemoveFile(file); + } else { + HILOGI("successfully but the IncrementalAllFileReady interface fails to be invoked: %{public}d", ret); + } + return ret; +} + +int BackupExtExtension::DoIncrementalBackup(const map &allFiles, + const map &smallFiles, + const map &bigFiles, + const map &bigInfos) +{ + HILOGI("Do increment backup"); + if (extension_->GetExtensionAction() != BConstants::ExtensionAction::BACKUP) { + return EPERM; + } + + string path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_BACKUP); + if (mkdir(path.data(), S_IRWXU) && errno != EEXIST) { + throw BError(errno); + } + + auto proxy = ServiceProxy::GetInstance(); + if (proxy == nullptr) { + throw BError(BError::Codes::EXT_BROKEN_BACKUP_SA, std::generic_category().message(errno)); + } + // 获取增量文件和全量数据 + if (smallFiles.size() == 0 && bigFiles.size() == 0) { + // 没有增量,则不需要上传 + TarMap tMap; + IncrementalAllFileReady(tMap, allFiles, proxy); + HILOGI("Do increment backup, IncrementalAllFileReady end, file empty"); + return ERR_OK; + } + + // tar包数据 + TarMap tarMap; + IncrementalPacket(smallFiles, tarMap, proxy); + HILOGI("Do increment backup, IncrementalPacket end"); + + // 最后回传大文件 + TarMap bigMap = GetIncrmentBigInfos(bigFiles); + IncrementalBigFileReady(bigMap, bigInfos, proxy); + HILOGI("Do increment backup, IncrementalBigFileReady end"); + bigMap.insert(tarMap.begin(), tarMap.end()); + + // 回传manage.json和全量文件 + IncrementalAllFileReady(bigMap, allFiles, proxy); + HILOGI("Do increment backup, IncrementalAllFileReady end"); + return ERR_OK; +} } // namespace OHOS::FileManagement::Backup diff --git a/frameworks/native/backup_ext/src/tar_file.cpp b/frameworks/native/backup_ext/src/tar_file.cpp index 80de4451c..c1cb79bb3 100644 --- a/frameworks/native/backup_ext/src/tar_file.cpp +++ b/frameworks/native/backup_ext/src/tar_file.cpp @@ -23,6 +23,7 @@ #include #include +#include "b_error/b_error.h" #include "b_resources/b_constants.h" #include "directory_ex.h" #include "filemgmt_libhilog.h" @@ -62,10 +63,7 @@ bool TarFile::Packet(const vector &srcFiles, const string &tarFileName, packagePath_ = packagePath_.substr(0, packagePath_.length() - 1); } - if (!CreateSplitTarFile()) { - HILOGE("Failed to create split tar file"); - return false; - } + CreateSplitTarFile(); size_t index = 0; for (auto &filePath : srcFiles) { @@ -81,9 +79,8 @@ bool TarFile::Packet(const vector &srcFiles, const string &tarFileName, } } - if (!FillSplitTailBlocks()) { - HILOGE("Failed to fill split tail blocks"); - } + FillSplitTailBlocks(); + tarMap = tarMap_; if (currentTarFile_ != nullptr) { @@ -109,13 +106,14 @@ bool TarFile::TraversalFile(string &filePath) } if (!AddFile(filePath, curFileStat, false)) { HILOGE("Failed to add file to tar package"); - return false; + throw BError(BError::Codes::EXT_BACKUP_PACKET_ERROR, "TraversalFile Failed to add file to tar package"); } if (currentTarFileSize_ >= DEFAULT_SLICE_SIZE) { fileCount_ = 0; FillSplitTailBlocks(); CreateSplitTarFile(); + return true; } // tar包内文件数量大于6000,分片打包 @@ -282,7 +280,7 @@ bool TarFile::CreateSplitTarFile() currentTarFile_ = fopen(currentTarName_.c_str(), "wb+"); if (currentTarFile_ == nullptr) { HILOGE("Failed to open file %{public}s, err = %{public}d", currentTarName_.c_str(), errno); - return false; + throw BError(BError::Codes::EXT_BACKUP_PACKET_ERROR, "CreateSplitTarFile Failed to open file"); } currentTarFileSize_ = 0; @@ -306,7 +304,7 @@ bool TarFile::FillSplitTailBlocks() int ret = stat(currentTarName_.c_str(), &staTar); if (ret != 0) { HILOGE("Failed to stat file %{public}s, err = %{public}d", currentTarName_.c_str(), errno); - return false; + throw BError(BError::Codes::EXT_BACKUP_PACKET_ERROR, "FillSplitTailBlocks Failed to stat file"); } if (staTar.st_size == 0 && tarFileCount_ > 0 && currentTarFile_ != nullptr) { fclose(currentTarFile_); @@ -319,6 +317,11 @@ bool TarFile::FillSplitTailBlocks() vector buff {}; buff.resize(BLOCK_SIZE); WriteAll(buff, END_BLOCK_SIZE); + + if (isSingle_) { + tarMap_.clear(); + } + tarMap_.emplace(tarFileName_, make_tuple(currentTarName_, staTar, false)); fflush(currentTarFile_); @@ -477,4 +480,11 @@ bool TarFile::WriteLongName(string &name, char type) return CompleteBlock(sz); } +/** + * 设置打包模式,如果是 +*/ +void TarFile::SetPacketMode(bool isSingle) +{ + isSingle_ = isSingle; +} } // namespace OHOS::FileManagement::Backup \ No newline at end of file diff --git a/utils/include/b_error/b_error.h b/utils/include/b_error/b_error.h index cc0ff5805..36a979ed7 100644 --- a/utils/include/b_error/b_error.h +++ b/utils/include/b_error/b_error.h @@ -85,6 +85,7 @@ public: EXT_ABILITY_DIED = 0x5004, EXT_ABILITY_TIMEOUT = 0x5005, EXT_FORBID_BACKUP_RESTORE = 0x5006, + EXT_BACKUP_PACKET_ERROR = 0x5007, }; enum BackupErrorCode { @@ -100,6 +101,7 @@ public: E_ETO = 13500003, E_DIED = 13500004, E_EMPTY = 13500005, + E_PACKET = 13500006, }; public: @@ -220,6 +222,7 @@ private: {Codes::EXT_ABILITY_TIMEOUT, "Extension process timeout"}, {Codes::EXT_ABILITY_DIED, "Extension process died"}, {Codes::EXT_FORBID_BACKUP_RESTORE, "forbid backup or restore"}, + {Codes::EXT_BACKUP_PACKET_ERROR, "Backup packet error"}, }; static inline const std::map errCodeTable_ { @@ -247,6 +250,7 @@ private: {static_cast(Codes::EXT_ABILITY_DIED), BackupErrorCode::E_DIED}, {static_cast(Codes::EXT_ABILITY_TIMEOUT), BackupErrorCode::E_ETO}, {static_cast(Codes::EXT_FORBID_BACKUP_RESTORE), BackupErrorCode::E_FORBID}, + {static_cast(Codes::EXT_BACKUP_PACKET_ERROR), BackupErrorCode::E_PACKET}, {BackupErrorCode::E_IPCSS, BackupErrorCode::E_IPCSS}, {BackupErrorCode::E_INVAL, BackupErrorCode::E_INVAL}, {BackupErrorCode::E_UKERR, BackupErrorCode::E_UKERR}, @@ -259,6 +263,7 @@ private: {BackupErrorCode::E_ETO, BackupErrorCode::E_ETO}, {BackupErrorCode::E_DIED, BackupErrorCode::E_DIED}, {BackupErrorCode::E_EMPTY, BackupErrorCode::E_EMPTY}, + {BackupErrorCode::E_PACKET, BackupErrorCode::E_PACKET}, }; private: -- Gitee