diff --git a/frameworks/native/backup_ext/src/ext_incremental_backup_extension.cpp b/frameworks/native/backup_ext/src/ext_incremental_backup_extension.cpp new file mode 100644 index 0000000000000000000000000000000000000000..caf300893fcc8886633f3db487daf7956f37046b --- /dev/null +++ b/frameworks/native/backup_ext/src/ext_incremental_backup_extension.cpp @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ext_extension.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "accesstoken_kit.h" +#include "bundle_mgr_client.h" +#include "errors.h" +#include "ipc_skeleton.h" + +#include "b_error/b_error.h" +#include "b_error/b_excep_utils.h" +#include "b_filesystem/b_dir.h" +#include "b_filesystem/b_file.h" +#include "b_filesystem/b_file_hash.h" +#include "b_json/b_json_cached_entity.h" +#include "b_json/b_json_entity_ext_manage.h" +#include "b_resources/b_constants.h" +#include "b_json/b_report_entity.h" +#include "b_tarball/b_tarball_factory.h" +#include "filemgmt_libhilog.h" +#include "service_proxy.h" +#include "tar_file.h" +#include "untar_file.h" + +namespace OHOS::FileManagement::Backup { +const string INDEX_FILE_BACKUP = string(BConstants::PATH_BUNDLE_BACKUP_HOME). + append(BConstants::SA_BUNDLE_BACKUP_BACKUP). + append(BConstants::EXT_BACKUP_MANAGE); +const string INDEX_FILE_RESTORE = string(BConstants::PATH_BUNDLE_BACKUP_HOME). + append(BConstants::SA_BUNDLE_BACKUP_RESTORE). + append(BConstants::EXT_BACKUP_MANAGE); +const string INDEX_FILE_INCREMENTAL_BACKUP = string(BConstants::PATH_BUNDLE_BACKUP_HOME). + append(BConstants::SA_BUNDLE_BACKUP_BACKUP); +using namespace std; + +namespace { +const int64_t DEFAULT_SLICE_SIZE = 100 * 1024 * 1024; // 分片文件大小为100M +const uint32_t MAX_FILE_COUNT = 6000; // 单个tar包最多包含6000个文件 +} // namespace + +static string GetReportFileName(const string &fileName) +{ + string reportName = fileName + "." + string(BConstants::REPORT_FILE_EXT); + return reportName; +} + +static tuple, map, + map, map> + CompareFiles(const UniqueFd &cloudFd, const UniqueFd &storageFd) +{ + BReportEntity cloudRp(UniqueFd(cloudFd.Get())); + map cloudFiles = cloudRp.GetReportInfos(); + BReportEntity storageRp(UniqueFd(storageFd.Get())); + map storageFiles = storageRp.GetReportInfos(); + map allFiles = {}; + map smallFiles = {}; + map bigFiles = {}; + map bigInfos = {}; + for (auto &item : storageFiles) { + // 进行文件对比 + string path = item.first; + if (item.second.isIncremental == true && item.second.isDir == true) { + smallFiles.try_emplace(path, item.second); + } + if (item.second.isIncremental == true && item.second.isDir == false) { + auto [res, fileHash] = BFileHash::HashWithSHA256(path); + if (fileHash.empty()) { + continue; + } + item.second.hash = fileHash; + item.second.isIncremental = true; + } else { + item.second.hash = (cloudFiles.find(path) == cloudFiles.end()) ? cloudFiles[path].hash : ""; + } + if (cloudFiles.find(path) == cloudFiles.end() || (item.second.isDir == false && + item.second.isIncremental == true && cloudFiles.find(path)->second.hash != item.second.hash)) { + // 在云空间简报里不存在或者hash不一致 + struct stat sta = {}; + if (stat(path.c_str(), &sta) == -1) { + continue; + } + if (sta.st_size < BConstants::BIG_FILE_BOUNDARY) { + item.second.size = sta.st_size; + smallFiles.try_emplace(path, item.second); + } else { + bigFiles.try_emplace(path, sta); + bigInfos.try_emplace(path, item.second); + } + } + allFiles.try_emplace(path, item.second); + } + HILOGI("compareFiles Find small files total: %{public}d", smallFiles.size()); + HILOGI("compareFiles Find big files total: %{public}d", bigFiles.size()); + return {allFiles, smallFiles, bigFiles, bigInfos}; +} + +static void WriteFile(const string &filename, const map &srcFiles) +{ + fstream f; + f.open(filename.data(), ios::out); + // 前面2行先填充进去 + f << "version=1.0&attrNum=6" << endl; + f << "path;mode;dir;size;mtime;hash" << endl; + for (auto item : srcFiles) { + struct ReportFileInfo info = item.second; + string str = item.first + ";" + info.mode + ";" + to_string(info.isDir) + ";" + to_string(info.size); + str += ";" + to_string(info.mtime) + ";" + info.hash; + f << str << endl; + } + f.close(); + HILOGI("WriteFile path: %{public}s", filename.c_str()); +} + +/** + * 获取增量的大文件的信息 +*/ +static TarMap GetIncrmentBigInfos(const map &files) +{ + auto getStringHash = [](const TarMap &m, const string &str) -> string { + ostringstream strHex; + strHex << hex; + + hash strHash; + size_t szHash = strHash(str); + strHex << setfill('0') << setw(BConstants::BIG_FILE_NAME_SIZE) << szHash; + string name = strHex.str(); + for (int i = 0; m.find(name) != m.end(); ++i, strHex.str("")) { + szHash = strHash(str + to_string(i)); + strHex << setfill('0') << setw(BConstants::BIG_FILE_NAME_SIZE) << szHash; + name = strHex.str(); + } + + return name; + }; + + TarMap bigFiles; + for (const auto &item : files) { + string md5Name = getStringHash(bigFiles, item.first); + if (!md5Name.empty()) { + bigFiles.emplace(md5Name, make_tuple(item.first, item.second, true)); + } + } + + return bigFiles; +} + +/** + * 增量tar包和简报信息回传 +*/ +static ErrCode IncrementalTarFileReady(const TarMap &bigFileInfo, const map &srcFiles, + sptr proxy) +{ + string tarFile = bigFileInfo.begin()->first; + HILOGI("IncrementalTarFileReady: tar: %{public}s", tarFile.c_str()); + string manageFile = GetReportFileName(tarFile); + HILOGI("IncrementalTarFileReady: manageFile: %{public}s", tarFile.c_str()); + string file = string(INDEX_FILE_INCREMENTAL_BACKUP).append(manageFile); + WriteFile(file, srcFiles); + + string tarName = string(INDEX_FILE_INCREMENTAL_BACKUP).append(tarFile); + ErrCode ret = + proxy->AppIncrementalFileReady(tarFile, UniqueFd(open(tarName.data(), O_RDONLY)), + UniqueFd(open(file.data(), O_RDONLY))); + if (SUCCEEDED(ret)) { + HILOGI("IncrementalTarFileReady: The application is packaged successfully"); + // 删除文件 + RemoveFile(file); + RemoveFile(tarName); + } else { + HILOGE("IncrementalTarFileReady interface fails to be invoked: %{public}d", ret); + } + return ret; +} + +/** + * 增量大文件和简报信息回传 +*/ +static ErrCode IncrementalBigFileReady(const TarMap &pkgInfo, const map &bigInfos, + sptr proxy) +{ + ErrCode ret {ERR_OK}; + for (auto &item : pkgInfo) { + if (item.first.empty()) { + continue; + } + auto [path, sta, isBeforeTar] = item.second; + + UniqueFd fd(open(path.data(), O_RDONLY)); + if (fd < 0) { + HILOGE("IncrementalBigFileReady open file failed, file name is %{public}s, err = %{public}d", + path.c_str(), errno); + continue; + } + + struct ReportFileInfo info = bigInfos.find(path)->second; + string file = GetReportFileName(string(INDEX_FILE_INCREMENTAL_BACKUP).append(item.first)); + HILOGI("IncrementalBigFileReady write name is %{public}s", path.c_str()); + HILOGI("IncrementalBigFileReady: file: %{public}s", file.c_str()); + map bigInfo; + bigInfo.try_emplace(path, info); + WriteFile(file, bigInfo); + + ret = proxy->AppIncrementalFileReady(item.first, std::move(fd), UniqueFd(open(file.data(), O_RDONLY))); + if (SUCCEEDED(ret)) { + HILOGI("IncrementalBigFileReady:The application is packaged successfully, package name is %{public}s", + item.first.c_str()); + RemoveFile(file); + } else { + HILOGE("IncrementalBigFileReady interface fails to be invoked: %{public}d", ret); + } + } + return ret; +} + +ErrCode BackupExtExtension::HandleIncrementalBackup(UniqueFd incrementalFd, UniqueFd manifestFd) +{ + string usrConfig = extension_->GetUsrConfig(); + BJsonCachedEntity cachedEntity(usrConfig); + auto cache = cachedEntity.Structuralize(); + if (!cache.GetAllowToBackupRestore()) { + HILOGE("Application does not allow backup or restore"); + return BError(BError::Codes::EXT_FORBID_BACKUP_RESTORE, "Application does not allow backup or restore") + .GetCode(); + } + auto [allFiles, smallFiles, bigFiles, bigInfos] = CompareFiles(move(manifestFd), move(incrementalFd)); + AsyncTaskOnIncrementalBackup(allFiles, smallFiles, bigFiles, bigInfos); + return 0; +} + +tuple BackupExtExtension::GetIncrementalBackupFileHandle() +{ + return {UniqueFd(-1), UniqueFd(-1)}; +} + +void BackupExtExtension::AsyncTaskOnIncrementalBackup(const map &allFiles, + const map &smallFiles, const map &bigFiles, + const map &bigInfos) +{ + auto task = [obj {wptr(this)}, allFiles, smallFiles, bigFiles, bigInfos]() { + auto ptr = obj.promote(); + try { + BExcepUltils::BAssert(ptr, BError::Codes::EXT_BROKEN_FRAMEWORK, + "Ext extension handle have been already released"); + BExcepUltils::BAssert(ptr->extension_, BError::Codes::EXT_INVAL_ARG, + "extension handle have been already released"); + + auto ret = ptr->DoIncrementalBackup(allFiles, smallFiles, bigFiles, bigInfos); + ptr->AppIncrementalDone(ret); + HILOGE("Incremental backup app done %{public}d", ret); + } catch (const BError &e) { + ptr->AppIncrementalDone(e.GetCode()); + } catch (const exception &e) { + HILOGE("Catched an unexpected low-level exception %{public}s", e.what()); + ptr->AppIncrementalDone(BError(BError::Codes::EXT_INVAL_ARG).GetCode()); + } catch (...) { + HILOGE("Failed to restore the ext bundle"); + ptr->AppIncrementalDone(BError(BError::Codes::EXT_INVAL_ARG).GetCode()); + } + }; + + threadPool_.AddTask([task]() { + try { + task(); + } catch (...) { + HILOGE("Failed to add task to thread pool"); + } + }); +} + +static string GetIncrmentPartName() +{ + auto now = chrono::system_clock::now(); + auto duration = now.time_since_epoch(); + auto milliseconds = chrono::duration_cast(duration); + + return to_string(milliseconds.count()) + "_part"; +} + +static void IncrementalPacket(const map &infos, TarMap &tar, sptr proxy) +{ + HILOGI("IncrementalPacket begin"); + string path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_BACKUP); + int64_t totalSize = 0; + uint32_t fileCount = 0; + vector packFiles; + map tarInfos; + + string partName = GetIncrmentPartName(); + for (auto small : infos) { + totalSize += small.second.size; + fileCount += 1; + packFiles.emplace_back(small.first); + tarInfos.try_emplace(small.first, small.second); + if (totalSize >= DEFAULT_SLICE_SIZE || fileCount >= MAX_FILE_COUNT) { + TarMap tarMap {}; + TarFile::GetInstance().Packet(packFiles, partName, path, tarMap); + tar.insert(tarMap.begin(), tarMap.end()); + // 执行tar包回传功能 + IncrementalTarFileReady(tarMap, tarInfos, proxy); + totalSize = 0; + fileCount = 0; + packFiles.clear(); + tarInfos.clear(); + } + } + if (fileCount > 0) { + // 打包回传 + TarMap tarMap {}; + TarFile::GetInstance().Packet(packFiles, partName, path, tarMap); + IncrementalTarFileReady(tarMap, tarInfos, proxy); + tar.insert(tarMap.begin(), tarMap.end()); + packFiles.clear(); + tarInfos.clear(); + } +} + +static ErrCode IncrementalAllFileReady(const TarMap &pkgInfo, const map &srcFiles, + sptr proxy) +{ + BJsonCachedEntity cachedEntity( + UniqueFd(open(INDEX_FILE_BACKUP.data(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR))); + auto cache = cachedEntity.Structuralize(); + cache.SetExtManage(pkgInfo); + cachedEntity.Persist(); + close(cachedEntity.GetFd().Release()); + + string file = GetReportFileName(string(INDEX_FILE_INCREMENTAL_BACKUP).append("all")); + WriteFile(file, srcFiles); + UniqueFd fd(open(INDEX_FILE_BACKUP.data(), O_RDONLY)); + UniqueFd manifestFd(open(file.data(), O_RDONLY)); + ErrCode ret = + proxy->AppIncrementalFileReady(string(BConstants::EXT_BACKUP_MANAGE), std::move(fd), std::move(manifestFd)); + if (SUCCEEDED(ret)) { + HILOGI("IncrementalAllFileReady successfully"); + RemoveFile(file); + } else { + HILOGI( + "successfully but the IncrementalAllFileReady interface fails to be invoked: %{public}d", ret); + } + return ret; +} + +int BackupExtExtension::DoIncrementalBackup(const map &allFiles, + const map &smallFiles, const map &bigFiles, + const map &bigInfos) +{ + HILOGI("Do increment backup"); + if (extension_->GetExtensionAction() != BConstants::ExtensionAction::BACKUP) { + return EPERM; + } + + string path = string(BConstants::PATH_BUNDLE_BACKUP_HOME).append(BConstants::SA_BUNDLE_BACKUP_BACKUP); + if (mkdir(path.data(), S_IRWXU) && errno != EEXIST) { + throw BError(errno); + } + + auto proxy = ServiceProxy::GetInstance(); + if (proxy == nullptr) { + throw BError(BError::Codes::EXT_BROKEN_BACKUP_SA, std::generic_category().message(errno)); + } + // 获取增量文件和全量数据 + if (smallFiles.size() == 0 && bigFiles.size() == 0) { + // 没有增量,则不需要上传 + TarMap tMap; + IncrementalAllFileReady(tMap, allFiles, proxy); + HILOGI("Do increment backup, IncrementalAllFileReady end, file empty"); + return ERR_OK; + } + + // tar包数据 + TarMap tarMap; + IncrementalPacket(smallFiles, tarMap, proxy); + HILOGI("Do increment backup, IncrementalPacket end"); + + // 最后回传大文件 + TarMap bigMap = GetIncrmentBigInfos(bigFiles); + IncrementalBigFileReady(bigMap, bigInfos, proxy); + HILOGI("Do increment backup, IncrementalBigFileReady end"); + bigMap.insert(tarMap.begin(), tarMap.end()); + + // 回传manage.json和全量文件 + IncrementalAllFileReady(bigMap, allFiles, proxy); + HILOGI("Do increment backup, IncrementalAllFileReady end"); + return ERR_OK; +} + +void BackupExtExtension::AppIncrementalDone(ErrCode errCode) +{ + auto proxy = ServiceProxy::GetInstance(); + BExcepUltils::BAssert(proxy, BError::Codes::EXT_BROKEN_IPC, "Failed to obtain the ServiceProxy handle"); + auto ret = proxy->AppIncrementalDone(errCode); + if (ret != ERR_OK) { + HILOGE("Failed to notify the app done. err = %{public}d", ret); + } +} +} // namespace OHOS::FileManagement::Backup diff --git a/services/backup_sa/include/module_ipc/svc_restore_deps_manager.h b/services/backup_sa/include/module_ipc/svc_restore_deps_manager.h index d830cb2b6152558e181544e9b3adc3858efd8147..cb83798b9109353e6a7542023014105c1996166f 100644 --- a/services/backup_sa/include/module_ipc/svc_restore_deps_manager.h +++ b/services/backup_sa/include/module_ipc/svc_restore_deps_manager.h @@ -43,7 +43,7 @@ public: void AddRestoredBundles(const string &bundleName); vector GetAllBundles() const; bool IsAllBundlesRestored() const; - void UpdateToRestoreBundleMap(const string &bundleName, const string &fileName); + bool UpdateToRestoreBundleMap(const string &bundleName, const string &fileName); private: SvcRestoreDepsManager() {} diff --git a/services/backup_sa/src/module_ipc/service.cpp b/services/backup_sa/src/module_ipc/service.cpp index 5151796a3cd51f60db5a0aada505bc2d255437f8..5242d8567e380a174b922255ad2470ef3fdcc6af 100644 --- a/services/backup_sa/src/module_ipc/service.cpp +++ b/services/backup_sa/src/module_ipc/service.cpp @@ -282,7 +282,7 @@ static vector GetRestoreBundleNames(UniqueFd fd, .spaceOccupied = (*it).spaceOccupied, .allToBackup = (*it).allToBackup, .extensionName = restoreInfo.extensionName, - .restoreDeps = (*it).restoreDeps}; + .restoreDeps = restoreInfo.restoreDeps}; restoreBundleInfos.emplace_back(info); } return restoreBundleInfos; @@ -540,6 +540,11 @@ ErrCode Service::GetFileHandle(const string &bundleName, const string &fileName) try { HILOGI("Begin"); VerifyCaller(IServiceReverse::Scenario::RESTORE); + + bool updateRes = SvcRestoreDepsManager::GetInstance().UpdateToRestoreBundleMap(bundleName, fileName); + if (updateRes) { + return BError(BError::Codes::OK); + } auto action = session_->GetServiceSchedAction(bundleName); if (action == BConstants::ServiceSchedAction::RUNNING) { auto backUpConnection = session_->GetExtConnection(bundleName); @@ -553,7 +558,6 @@ ErrCode Service::GetFileHandle(const string &bundleName, const string &fileName) } session_->GetServiceReverseProxy()->RestoreOnFileReady(bundleName, fileName, move(fd)); } else { - SvcRestoreDepsManager::GetInstance().UpdateToRestoreBundleMap(bundleName, fileName); session_->SetExtFileNameRequest(bundleName, fileName); } return BError(BError::Codes::OK); diff --git a/services/backup_sa/src/module_ipc/svc_restore_deps_manager.cpp b/services/backup_sa/src/module_ipc/svc_restore_deps_manager.cpp index 4f760ba8df4be6ef90faedf063b2f0999d4dce7c..115db71c495443b2d8149ecec32c30ac24481fba 100644 --- a/services/backup_sa/src/module_ipc/svc_restore_deps_manager.cpp +++ b/services/backup_sa/src/module_ipc/svc_restore_deps_manager.cpp @@ -151,13 +151,15 @@ bool SvcRestoreDepsManager::IsAllBundlesRestored() const return toRestoreBundleMap_.empty(); } -void SvcRestoreDepsManager::UpdateToRestoreBundleMap(const string &bundleName, const string &fileName) +bool SvcRestoreDepsManager::UpdateToRestoreBundleMap(const string &bundleName, const string &fileName) { unique_lock lock(lock_); auto it = toRestoreBundleMap_.find(bundleName); if (it != toRestoreBundleMap_.end()) { it->second.fileNames_.insert(fileName); + return true; } + return false; } } // namespace OHOS::FileManagement::Backup \ No newline at end of file