diff --git a/frameworks/native/backup_ext/BUILD.gn b/frameworks/native/backup_ext/BUILD.gn index ec99742a2db2cd5ce9fa3e6b36b69f3b673a3b68..ce6381303c63f30c1522a38c77ffe09d21dfb46c 100644 --- a/frameworks/native/backup_ext/BUILD.gn +++ b/frameworks/native/backup_ext/BUILD.gn @@ -21,6 +21,8 @@ ohos_shared_library("backup_extension_ability_native") { "src/ext_backup_loader.cpp", "src/ext_extension.cpp", "src/ext_extension_stub.cpp", + "src/tar_file.cpp", + "src/untar_file.cpp", ] defines = [ diff --git a/frameworks/native/backup_ext/src/ext_extension.cpp b/frameworks/native/backup_ext/src/ext_extension.cpp index 485f0b64346c3eac8320ad221c9341a9df502bc8..a2ce1fd81dab0c574d9171feeb7e70b430e38b85 100644 --- a/frameworks/native/backup_ext/src/ext_extension.cpp +++ b/frameworks/native/backup_ext/src/ext_extension.cpp @@ -42,6 +42,8 @@ #include "b_tarball/b_tarball_factory.h" #include "filemgmt_libhilog.h" #include "service_proxy.h" +#include "tar_file.h" +#include "untar_file.h" namespace OHOS::FileManagement::Backup { const string DEFAULT_TAR_PKG = "1.tar"; @@ -111,7 +113,7 @@ ErrCode BackupExtExtension::HandleClear() return ERR_OK; } -static ErrCode IndexFileReady(const map> &pkgInfo, sptr proxy) +static ErrCode IndexFileReady(const TarMap &pkgInfo, sptr proxy) { BJsonCachedEntity cachedEntity( UniqueFd(open(INDEX_FILE_BACKUP.data(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR))); @@ -235,15 +237,14 @@ static bool IsUserTar(const string &tarFile, const string &indexFile) return false; } -static map> GetBigFileInfo(const vector &includes, - const vector &excludes) +static pair> GetFileInfos(const vector &includes, const vector &excludes) { - auto [errCode, files] = BDir::GetBigFiles(includes, excludes); + auto [errCode, files, smallFiles] = BDir::GetBigFiles(includes, excludes); if (errCode != 0) { return {}; } - auto GetStringHash = [](const map> &m, const string &str) -> string { + auto GetStringHash = [](const TarMap &m, const string &str) -> string { ostringstream strHex; strHex << hex; @@ -260,7 +261,7 @@ static map> GetBigFileInfo(const vector return name; }; - map> bigFiles; + TarMap bigFiles; for (const auto &item : files) { string md5Name = GetStringHash(bigFiles, item.first); if (!md5Name.empty()) { @@ -268,7 +269,7 @@ static map> GetBigFileInfo(const vector } } - return bigFiles; + return {bigFiles, smallFiles}; } int BackupExtExtension::DoBackup(const BJsonEntityExtensionConfig &usrConfig) @@ -287,7 +288,7 @@ int BackupExtExtension::DoBackup(const BJsonEntityExtensionConfig &usrConfig) vector excludes = usrConfig.GetExcludes(); // 大文件处理 - auto bigFileInfo = GetBigFileInfo(includes, excludes); + auto [bigFileInfo, smallFiles] = GetFileInfos(includes, excludes); for (const auto &item : bigFileInfo) { auto filePath = std::get<0>(item.second); if (!filePath.empty()) { @@ -295,18 +296,10 @@ int BackupExtExtension::DoBackup(const BJsonEntityExtensionConfig &usrConfig) } } - string tarName = path + DEFAULT_TAR_PKG; - string root = "/"; - - // 打包 - auto tarballTar = BTarballFactory::Create("cmdline", tarName); - (tarballTar->tar)(root, {includes.begin(), includes.end()}, {excludes.begin(), excludes.end()}); - - struct stat sta = {}; - if (stat(tarName.data(), &sta) == -1) { - HILOGE("failed to invoke the system function stat, %{public}s", tarName.c_str()); - } - bigFileInfo.emplace(DEFAULT_TAR_PKG, make_tuple(tarName, sta, false)); + // 分片打包 + TarMap tarMap {}; + TarFile::GetInstance().Packet(smallFiles, "part", path, tarMap); + bigFileInfo.insert(tarMap.begin(), tarMap.end()); auto proxy = ServiceProxy::GetInstance(); if (proxy == nullptr) { @@ -331,14 +324,7 @@ int BackupExtExtension::DoRestore(const string &fileName) // REM: 解压启动Extension时即挂载好的备份目录中的数据 string path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_RESTORE); string tarName = path + fileName; - HILOGI("tarName:%{public}s, fileName:%{public}s", tarName.c_str(), fileName.c_str()); - - auto tarballFunc = BTarballFactory::Create("cmdline", tarName); - if (extension_->WasFromSpeicalVersion() || extension_->UseFullBackupOnly()) { - (tarballFunc->untar)(path); - } else { - (tarballFunc->untar)("/"); - } + UntarFile::GetInstance().UnPacket(tarName, "/"); HILOGI("Application recovered successfully, package path is %{public}s", tarName.c_str()); return ERR_OK; @@ -433,13 +419,17 @@ static void RestoreBigFiles() HILOGE("file path is empty. %{public}s", filePath.c_str()); continue; } - if (rename(fileName.data(), filePath.data()) != 0) { - HILOGE("failed to rename the file, try to copy it. err = %{public}d", errno); - if (!BFile::CopyFile(fileName, filePath)) { - HILOGE("failed to copy the file. err = %{public}d", errno); - continue; + if (!BFile::CopyFile(fileName, filePath)) { + HILOGE("failed to copy the file. err = %{public}d", errno); + continue; + } + if (chmod(filePath.c_str(), item.sta.st_mode) != 0) { + HILOGE("Failed to chmod %{public}s, err = %{public}d", filePath.c_str(), errno); + } + if (fileName != filePath) { + if (!RemoveFile(fileName)) { + HILOGE("Failed to delete the big file %{public}s", fileName.c_str()); } - HILOGI("succeed to rename or copy the file"); } set lks = cache.GetHardLinkInfo(item.hashName); for (const auto &lksPath : lks) { @@ -459,15 +449,21 @@ static void RestoreBigFiles() static void DeleteBackupTars() { // The directory include tars and manage.json which would be deleted - string path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_RESTORE); - string tarPath = path + DEFAULT_TAR_PKG; - string indexPath = path + string(BConstants::EXT_BACKUP_MANAGE); - if (!RemoveFile(tarPath)) { - HILOGE("Failed to delete the backup tar %{public}s", tarPath.c_str()); + BJsonCachedEntity cachedEntity(UniqueFd(open(INDEX_FILE_RESTORE.data(), O_RDONLY))); + auto cache = cachedEntity.Structuralize(); + auto info = cache.GetExtManage(); + auto path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_RESTORE); + for (auto &item : info) { + if (ExtractFileExt(item) != "tar" || IsUserTar(item, INDEX_FILE_RESTORE)) { + continue; + } + string tarPath = path + item; + if (!RemoveFile(tarPath)) { + HILOGE("Failed to delete the backup tar %{public}s", tarPath.c_str()); + } } - - if (!RemoveFile(indexPath)) { - HILOGE("Failed to delete the backup index %{public}s", indexPath.c_str()); + if (!RemoveFile(INDEX_FILE_RESTORE)) { + HILOGE("Failed to delete the backup index %{public}s", INDEX_FILE_RESTORE.c_str()); } } @@ -484,7 +480,7 @@ void BackupExtExtension::AsyncTaskRestore() // 解压 int ret = ERR_OK; - for (auto item : tars) { // 处理要解压的tar文件 + for (auto item : tars) { if (ExtractFileExt(item) == "tar" && !IsUserTar(item, INDEX_FILE_RESTORE)) { ret = ptr->DoRestore(item); } diff --git a/tests/unittests/backup_utils/b_filesystem/b_dir_test.cpp b/tests/unittests/backup_utils/b_filesystem/b_dir_test.cpp index e0c78540b4990b489415cfd43a22ccb9051914a9..f89595510d083390afd03c5fb04b736597362cc3 100644 --- a/tests/unittests/backup_utils/b_filesystem/b_dir_test.cpp +++ b/tests/unittests/backup_utils/b_filesystem/b_dir_test.cpp @@ -104,7 +104,7 @@ HWTEST_F(BDirTest, b_dir_GetBigFiles_0100, testing::ext::TestSize.Level1) EXPECT_EQ(ret, 0); vector includes = {rootDir}; vector excludes = {filePath2}; - auto [errCode, mpNameToStat] = BDir::GetBigFiles(includes, excludes); + auto [errCode, mpNameToStat, smallFiles] = BDir::GetBigFiles(includes, excludes); EXPECT_EQ(errCode, ERR_OK); EXPECT_EQ(mpNameToStat.at(filePath1).st_size, 1024 * 1024 * 1025); EXPECT_EQ(mpNameToStat.find(filePath2), mpNameToStat.end()); @@ -130,7 +130,7 @@ HWTEST_F(BDirTest, b_dir_GetBigFiles_0200, testing::ext::TestSize.Level1) try { vector includes = {{}, {}}; vector excludes = {{}}; - auto [errCode, mpNameToStat] = BDir::GetBigFiles(includes, excludes); + auto [errCode, mpNameToStat, smallFiles] = BDir::GetBigFiles(includes, excludes); EXPECT_EQ(errCode, ERR_OK); } catch (...) { EXPECT_TRUE(false); @@ -171,7 +171,7 @@ HWTEST_F(BDirTest, b_dir_GetBigFiles_0300, testing::ext::TestSize.Level1) system(touchFilePrefix.append("c.txt").c_str()); vector includes = {preparedDir + string("/*"), preparedDir + string("test")}; vector excludes = {preparedDir + string("/test/test1/test2"), {}}; - auto [errCode, mpNameToStat] = BDir::GetBigFiles(includes, excludes); + auto [errCode, mpNameToStat, smallFiles] = BDir::GetBigFiles(includes, excludes); EXPECT_EQ(errCode, ERR_OK); } catch (...) { EXPECT_TRUE(false); diff --git a/utils/include/b_filesystem/b_dir.h b/utils/include/b_filesystem/b_dir.h index 27762942eef16c76ac587551b51292ab580658a6..753529cb63e2eb48567c061412314a7ceb966072 100644 --- a/utils/include/b_filesystem/b_dir.h +++ b/utils/include/b_filesystem/b_dir.h @@ -46,8 +46,8 @@ public: * @param excludes 需要排除的文件及目录集合 * @return 错误码、大文件名集合 */ - static std::pair> GetBigFiles(const std::vector &includes, - const std::vector &excludes); + static std::tuple, std::vector> GetBigFiles( + const std::vector &includes, const std::vector &excludes); /** * @brief Get the Dirs object diff --git a/utils/include/b_json/b_json_entity_caps.h b/utils/include/b_json/b_json_entity_caps.h index db51d4270fed89cbd12bf797ba0f7770f43b08de..e52dd173b16f5fbcadde0e1c41cede18d445582b 100644 --- a/utils/include/b_json/b_json_entity_caps.h +++ b/utils/include/b_json/b_json_entity_caps.h @@ -110,7 +110,6 @@ public: string restoreDeps(""); if (item.isMember("restoreDeps") && item["restoreDeps"].isString()) { restoreDeps = item["restoreDeps"].asString(); - HILOGI("restoreDeps is %{public}s", restoreDeps.data()); } bundleInfos.emplace_back(BundleInfo {item["name"].asString(), item["versionCode"].asUInt(), item["versionName"].asString(), item["spaceOccupied"].asInt64(), diff --git a/utils/include/b_resources/b_constants.h b/utils/include/b_resources/b_constants.h index e9adc535625af4fa1b460fd7797b10b57a1a3066..70c2ed6da950429c94f86a8cd223fdecd33b3b72 100644 --- a/utils/include/b_resources/b_constants.h +++ b/utils/include/b_resources/b_constants.h @@ -62,8 +62,8 @@ constexpr int DECIMAL_BASE = 10; // 十进制基数 constexpr int HEADER_SIZE = 512; // 打包文件头部Header结构体大小 constexpr int BLOCK_SIZE = 512; // 打包文件数据段尾部补充的全零字节块上限大小 constexpr int BLOCK_PADDING_SIZE = 1024; // 打包文件尾部追加的全零字节块大小 -constexpr off_t BIG_FILE_BOUNDARY = 1024 * 1024 * 1024; // 大文件边界 -constexpr unsigned long BIG_FILE_NAME_SIZE = 16; // 大文件名长度(hash处理) +constexpr off_t BIG_FILE_BOUNDARY = 2 * 1024 * 1024; // 大文件边界 +constexpr unsigned long BIG_FILE_NAME_SIZE = 16; // 大文件名长度(hash处理) constexpr int PATHNAME_MAX_SIZE = 100; // 打包文件头部Header结构体各字段数组/字符串大小。 constexpr int MODE_MAX_SIZE = 8; diff --git a/utils/src/b_filesystem/b_dir.cpp b/utils/src/b_filesystem/b_dir.cpp index 045a96ce51515abcc7fee1c133e695205c2e2a06..cbfc884c80c06786ece88cc3a4fce3e9e22eeb2e 100644 --- a/utils/src/b_filesystem/b_dir.cpp +++ b/utils/src/b_filesystem/b_dir.cpp @@ -35,13 +35,40 @@ namespace OHOS::FileManagement::Backup { using namespace std; -static pair> GetDirFilesDetail(const string &path, bool recursion, off_t size = -1) +static bool IsEmptyDirectory(const string &path) +{ + DIR *dir = opendir(path.c_str()); + if (dir == nullptr) { + return false; + } + bool isEmpty = true; + struct dirent *entry; + while ((entry = readdir(dir)) != nullptr) { + if (entry->d_type != DT_DIR || (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0)) { + isEmpty = false; + break; + } + } + closedir(dir); + return isEmpty; +} + +static tuple, vector> GetDirFilesDetail(const string &path, + bool recursion, + off_t size = -1) { map files; + vector smallFiles; + + if (IsEmptyDirectory(path)) { + smallFiles.emplace_back(path); + return {BError(BError::Codes::OK).GetCode(), files, smallFiles}; + } + unique_ptr> dir = {opendir(path.c_str()), closedir}; if (!dir) { HILOGE("Invalid directory path: %{private}s", path.c_str()); - return {BError(errno).GetCode(), files}; + return {BError(errno).GetCode(), files, smallFiles}; } struct dirent *ptr = nullptr; @@ -54,12 +81,13 @@ static pair> GetDirFilesDetail(const string &p continue; } - auto [errCode, subfiles] = + auto [errCode, subFiles, subSmallFiles] = GetDirFilesDetail(IncludeTrailingPathDelimiter(path) + string(ptr->d_name), recursion, size); if (errCode != 0) { - return {errCode, files}; + return {errCode, files, smallFiles}; } - files.merge(subfiles); + files.merge(subFiles); + smallFiles.insert(smallFiles.end(), subSmallFiles.begin(), subSmallFiles.end()); } else if (ptr->d_type == DT_LNK) { continue; } else { @@ -69,6 +97,8 @@ static pair> GetDirFilesDetail(const string &p continue; } if (sta.st_size < size) { + HILOGI("Find small file %{public}s", fileName.data()); + smallFiles.emplace_back(fileName); continue; } HILOGI("Find big file"); @@ -76,7 +106,7 @@ static pair> GetDirFilesDetail(const string &p } } - return {BError(BError::Codes::OK).GetCode(), files}; + return {BError(BError::Codes::OK).GetCode(), files, smallFiles}; } tuple> BDir::GetDirFiles(const string &path) @@ -136,19 +166,23 @@ static set ExpandPathWildcard(const vector &vec) return filteredPath; } -pair> BDir::GetBigFiles(const vector &includes, - const vector &excludes) +tuple, vector> BDir::GetBigFiles(const vector &includes, + const vector &excludes) { set inc = ExpandPathWildcard(includes); map incFiles; + vector incSmallFiles; for (const auto &item : inc) { - auto [errCode, files] = + auto [errCode, files, smallFiles] = OHOS::FileManagement::Backup::GetDirFilesDetail(item, true, BConstants::BIG_FILE_BOUNDARY); if (errCode == 0) { int32_t num = static_cast(files.size()); HILOGI("found big files. total number is : %{public}d", num); incFiles.merge(move(files)); + + HILOGI("found small files. total number is : %{public}d", static_cast(smallFiles.size())); + incSmallFiles.insert(incSmallFiles.end(), smallFiles.begin(), smallFiles.end()); } } @@ -172,9 +206,9 @@ pair> BDir::GetBigFiles(const vector & bigFiles[item.first] = item.second; } } - int32_t num = static_cast(bigFiles.size()); - HILOGI("total number of big files is %{public}d", num); - return {ERR_OK, move(bigFiles)}; + HILOGI("total number of big files is %{public}d", static_cast(bigFiles.size())); + HILOGI("total number of small files is %{public}d", static_cast(incSmallFiles.size())); + return {ERR_OK, move(bigFiles), move(incSmallFiles)}; } vector BDir::GetDirs(const vector &paths) diff --git a/utils/src/b_json/b_json_entity_ext_manage.cpp b/utils/src/b_json/b_json_entity_ext_manage.cpp index bff04e1d71d86cc0fe532ac6e516646d2c5b4b24..5b9f3e2f3b57f148cd02d44318006cebb941d253 100644 --- a/utils/src/b_json/b_json_entity_ext_manage.cpp +++ b/utils/src/b_json/b_json_entity_ext_manage.cpp @@ -58,6 +58,7 @@ Json::Value Stat2JsonValue(struct stat sta) Json::Value value; value["st_size"] = static_cast(sta.st_size); + value["st_mode"] = static_cast(sta.st_mode); value["st_atim"]["tv_sec"] = static_cast(sta.st_atim.tv_sec); value["st_atim"]["tv_nsec"] = static_cast(sta.st_atim.tv_nsec); value["st_mtim"]["tv_sec"] = static_cast(sta.st_mtim.tv_sec); @@ -75,6 +76,7 @@ struct stat JsonValue2Stat(const Json::Value &value) } sta.st_size = value.isMember("st_size") && value["st_size"].isInt64() ? value["st_size"].asInt64() : 0; + sta.st_mode = value.isMember("st_mode") && value["st_mode"].isInt() ? value["st_mode"].asInt() : 0; if (value.isMember("st_atim")) { sta.st_atim.tv_sec = value["st_atim"].isMember("tv_sec") && value["st_atim"]["tv_sec"].isInt64() ? value["st_atim"]["tv_sec"].asInt64()