diff --git a/tools/components_deps/components_deps_analyzer.py b/tools/components_deps/components_deps_analyzer.py index b74aa43bc16837749d18c886d8c9d30d67300d59..36d93b349da2eb986a85abb18a5cbc70119518e9 100644 --- a/tools/components_deps/components_deps_analyzer.py +++ b/tools/components_deps/components_deps_analyzer.py @@ -22,6 +22,42 @@ import re class Analyzer: + @classmethod + def analysis(cls, gn_path_list, new_line_nums, gn_name, config_path: str, open_components_path, + result_json_name: str): + if not os.path.exists(config_path): + print("error: {} is inaccessible or not found".format(config_path)) + return + if not os.path.exists(open_components_path): + print("error: {} is inaccessible or not found".format(open_components_path)) + return + if len(gn_path_list) != len(new_line_nums): + print("error: The new_line_nums and the gn_path are not in one-to-one correspondence.") + return + if len(gn_path_list) != len(gn_name): + print("error: The gn_path and gn_name are not in one-to-one correspondence.") + return + required_components = cls.__get_required_components(config_path) + open_components, gn_name_list, white_list = cls.__get_open_components(open_components_path) + gn_name2component = dict(zip(gn_name_list, open_components)) + optional_components = list() + for components in open_components: + if components not in required_components: + optional_components.append(components) + result = list() + for i, _ in enumerate(gn_path_list): + one_result = dict() + one_result["file_path"] = gn_path_list[i] + if gn_name[i] in gn_name_list and gn_name2component[gn_name[i]] in required_components: + one_result["error"] = cls.__judge_deps(gn_path_list[i], new_line_nums[i], open_components, + optional_components, white_list) + else: + one_result["error"] = [] + result.append(one_result) + with os.fdopen(os.open(result_json_name + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), "w", + encoding='utf-8') as fd: + json.dump(result, fd, indent=4, ensure_ascii=False) + @classmethod def __get_open_components(cls, xml_path): open_components = list() @@ -129,42 +165,6 @@ class Analyzer: line_num = [int(i) for i in line_num] return line_num - @classmethod - def analysis(cls, gn_path_list, new_line_nums, gn_name, config_path: str, open_components_path, - result_json_name: str): - if not os.path.exists(config_path): - print("error: {} is inaccessible or not found".format(config_path)) - return - if not os.path.exists(open_components_path): - print("error: {} is inaccessible or not found".format(open_components_path)) - return - if len(gn_path_list) != len(new_line_nums): - print("error: The new_line_nums and the gn_path are not in one-to-one correspondence.") - return - if len(gn_path_list) != len(gn_name): - print("error: The gn_path and gn_name are not in one-to-one correspondence.") - return - required_components = cls.__get_required_components(config_path) - open_components, gn_name_list, white_list = cls.__get_open_components(open_components_path) - gn_name2component = dict(zip(gn_name_list, open_components)) - optional_components = list() - for components in open_components: - if components not in required_components: - optional_components.append(components) - result = list() - for i, _ in enumerate(gn_path_list): - one_result = dict() - one_result["file_path"] = gn_path_list[i] - if gn_name[i] in gn_name_list and gn_name2component[gn_name[i]] in required_components: - one_result["error"] = cls.__judge_deps(gn_path_list[i], new_line_nums[i], open_components, - optional_components, white_list) - else: - one_result["error"] = [] - result.append(one_result) - with os.fdopen(os.open(result_json_name + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), "w", - encoding='utf-8') as fd: - json.dump(result, fd, indent=4, ensure_ascii=False) - def get_args(): parser = argparse.ArgumentParser( @@ -193,4 +193,4 @@ if __name__ == '__main__': open_components_xml_path = args.open_component_xml_path result_json = args.result_json_name Analyzer.analysis(gn_path_list_name, new_line_nums_list, gn_component_name, config_path, open_components_xml_path, - result_json) + result_json) \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/pkgs/basic_tool.py b/tools/rom_ram_analyzer/lite_small/pkgs/basic_tool.py index 421b3cf2ced03f3aced17f564170f168d809bde2..923e6fe072897fd38fb9992e044311ee45e35b6d 100644 --- a/tools/rom_ram_analyzer/lite_small/pkgs/basic_tool.py +++ b/tools/rom_ram_analyzer/lite_small/pkgs/basic_tool.py @@ -15,6 +15,8 @@ # +__all__ = ["translate_str_unit", "BasicTool", "do_nothing", "get_unit", "unit_adaptive"] + import itertools import os import re @@ -22,8 +24,6 @@ import glob from typing import * import unittest -__all__ = ["translate_str_unit", "BasicTool", "do_nothing", "get_unit", "unit_adaptive"] - def unit_adaptive(size: int) -> str: unit_list = ["Byte", "KB", "MB", "GB"] @@ -174,4 +174,4 @@ class BasicTool: """ output = os.popen(cmd).read() output = post_processor(output) - return output + return output \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/pkgs/gn_common_tool.py b/tools/rom_ram_analyzer/lite_small/pkgs/gn_common_tool.py index 61c74709789f2e1fdb8f7df030f2036ca1f3f9f4..07691b7aae6254c027b71c62846c2fc71ffacbb3 100644 --- a/tools/rom_ram_analyzer/lite_small/pkgs/gn_common_tool.py +++ b/tools/rom_ram_analyzer/lite_small/pkgs/gn_common_tool.py @@ -33,26 +33,6 @@ class GnCommonTool: 处理BUILD.gn文件的通用方法 """ - @classmethod - def _find_gn_variable_list(cls, content: str) -> List: - """ - 获取s中${xxx}或$xxx形式的gn变量 - :param content: 待查找的字符串 - :param sep: 分隔符,使用本分隔符将内容进行分隔然后逐一查找 - :return: 变量名及其符号,eg:${abc}、$abc - :FIXME 没有对a = 'a' b = a中的b这种形式进行处理 - """ - result = list() - splited = content.split(os.sep) - patern = re.compile(r"\${.*?}") - for item in splited: - m = re.findall(patern, item) - result.extend(m) - if len(m) == 0 and "$" in item: - item = item.strip('"') - result.append(item[item.index("$"):]) - return result - @classmethod def is_gn_variable(cls, target: str, quote_processed: bool = False): """ @@ -179,6 +159,26 @@ class GnCommonTool: break return result + @classmethod + def _find_gn_variable_list(cls, content: str) -> List: + """ + 获取s中${xxx}或$xxx形式的gn变量 + :param content: 待查找的字符串 + :param sep: 分隔符,使用本分隔符将内容进行分隔然后逐一查找 + :return: 变量名及其符号,eg:${abc}、$abc + :FIXME 没有对a = 'a' b = a中的b这种形式进行处理 + """ + result = list() + splited = content.split(os.sep) + patern = re.compile(r"\${.*?}") + for item in splited: + m = re.findall(patern, item) + result.extend(m) + if len(m) == 0 and "$" in item: + item = item.strip('"') + result.append(item[item.index("$"):]) + return result + class GnVariableParser: @classmethod diff --git a/tools/rom_ram_analyzer/lite_small/pkgs/rom_ram_baseline_collector.py b/tools/rom_ram_analyzer/lite_small/pkgs/rom_ram_baseline_collector.py index 66b22d260547966efdcd4035fd0d576566b09855..dbf627323aaf6a91aa01d2e4eed6d4b2398db105 100644 --- a/tools/rom_ram_analyzer/lite_small/pkgs/rom_ram_baseline_collector.py +++ b/tools/rom_ram_analyzer/lite_small/pkgs/rom_ram_baseline_collector.py @@ -15,26 +15,17 @@ # This file is to collect baseline information (according to bundle.json) +from typing import Dict, Any +import json +import logging if __name__ == '__main__': from basic_tool import BasicTool else: from pkgs.basic_tool import BasicTool -from typing import Dict, Any -import json -import logging class RomRamBaselineCollector: """collect baseline of rom and ram from bundle.json """ - @classmethod - def _put(cls, result_dict: Dict, subsystem_name: str, component_name: str, rom_size: str, ram_size: str, bundle_path:str) -> None: - if not result_dict.get(subsystem_name): - result_dict[subsystem_name] = dict() - result_dict[subsystem_name][component_name] = dict() - result_dict[subsystem_name][component_name]["rom"] = rom_size - result_dict[subsystem_name][component_name]["ram"] = ram_size - result_dict[subsystem_name][component_name]["bundle.json"] = bundle_path - @classmethod def collect(cls, oh_path: str) -> Dict[str, Dict]: """ @@ -67,4 +58,13 @@ class RomRamBaselineCollector: if not (subsystem_name or rom_baseline or ram_baseline): logging.warning(f"subsystem=\"{subsystem_name}\", rom=\"{rom_baseline}\", ram=\"{ram_baseline}\" in {bundle}") cls._put(rom_ram_baseline_dict, subsystem_name, component_name, rom_baseline, ram_baseline, bundle) - return rom_ram_baseline_dict \ No newline at end of file + return rom_ram_baseline_dict + + @classmethod + def _put(cls, result_dict: Dict, subsystem_name: str, component_name: str, rom_size: str, ram_size: str, bundle_path:str) -> None: + if not result_dict.get(subsystem_name): + result_dict[subsystem_name] = dict() + result_dict[subsystem_name][component_name] = dict() + result_dict[subsystem_name][component_name]["rom"] = rom_size + result_dict[subsystem_name][component_name]["ram"] = ram_size + result_dict[subsystem_name][component_name]["bundle.json"] = bundle_path \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/pkgs/simple_excel_writer.py b/tools/rom_ram_analyzer/lite_small/pkgs/simple_excel_writer.py index 81f44316f0b59108b1198565f3af63229145a8ee..6c06310129d9d47b30b74dd9efe2ffd32d4e60a4 100644 --- a/tools/rom_ram_analyzer/lite_small/pkgs/simple_excel_writer.py +++ b/tools/rom_ram_analyzer/lite_small/pkgs/simple_excel_writer.py @@ -57,20 +57,6 @@ class SimpleExcelWriter: self.__head_style.pattern = ptrn self.__content_style.alignment = algmt - def __increment_y(self, sheet_name: str, value: int = 1) -> int: - if sheet_name in self.__sheet_pos.keys(): - x, y = self.__sheet_pos.get(sheet_name) - y = y + value - self.__sheet_pos[sheet_name] = (x, y) - return y - - def __increment_x(self, sheet_name: str, value: int = 1) -> int: - if sheet_name in self.__sheet_pos.keys(): - x, y = self.__sheet_pos.get(sheet_name) - x = x + value - self.__sheet_pos[sheet_name] = (x, 0) - return x - def append_line(self, content: list, sheet_name: str = None): sheet_name = self.__default_sheet_name if sheet_name is None else sheet_name if sheet_name not in self.__sheet_dict.keys(): @@ -122,3 +108,17 @@ class SimpleExcelWriter: def save(self, file_name: str): self.__book.save(file_name) + + def __increment_y(self, sheet_name: str, value: int = 1) -> int: + if sheet_name in self.__sheet_pos.keys(): + x, y = self.__sheet_pos.get(sheet_name) + y = y + value + self.__sheet_pos[sheet_name] = (x, y) + return y + + def __increment_x(self, sheet_name: str, value: int = 1) -> int: + if sheet_name in self.__sheet_pos.keys(): + x, y = self.__sheet_pos.get(sheet_name) + x = x + value + self.__sheet_pos[sheet_name] = (x, 0) + return x \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/src/config.py b/tools/rom_ram_analyzer/lite_small/src/config.py index be7819575f9b7cba03bf150721f05b67cd48f29b..f506c4ebe97abe104d740285d449d27f7a5696fb 100644 --- a/tools/rom_ram_analyzer/lite_small/src/config.py +++ b/tools/rom_ram_analyzer/lite_small/src/config.py @@ -17,6 +17,8 @@ # products. +__all__ = ["configs", "result_dict", "collector_config", "sub_com_dict"] + import os import sys import argparse @@ -30,9 +32,6 @@ from pkgs.basic_tool import do_nothing, BasicTool from get_subsystem_component import SC from misc import * from template_processor import * -""" -只给rom_analysis.py使用 -""" def parse_args(): @@ -283,6 +282,4 @@ collector_config: Tuple[BaseProcessor] = ( unit_post_handler=DefaultPostHandler(), resource_field="sources" ), -) - -__all__ = ["configs", "result_dict", "collector_config", "sub_com_dict"] +) \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/src/get_subsystem_component.py b/tools/rom_ram_analyzer/lite_small/src/get_subsystem_component.py index 2620c7fbe050edb0b85fc5e982a239b32aeaff4b..8646209258ac66dcc50ff0a8d6dc32d627e3fb8a 100644 --- a/tools/rom_ram_analyzer/lite_small/src/get_subsystem_component.py +++ b/tools/rom_ram_analyzer/lite_small/src/get_subsystem_component.py @@ -16,6 +16,8 @@ # This file is for get the mapping relationship of subsystem_name/component_name # and their directory. The code is from Yude Chen. +__all__ = ["SC"] + import argparse import os import json @@ -79,7 +81,7 @@ def get_subsystem_components_modified(ohos_root) -> dict: def export_to_json(subsystem_item: dict, output_filename: str): subsystem_item_json = json.dumps( subsystem_item, indent=4, separators=(', ', ': ')) - with open(output_filename, 'w') as f: + with os.fdopen(os.open(output_filename, os.O_WRONLY | os.O_CREAT, mode=0o640), 'w') as f: f.write(subsystem_item_json) logging.info("output path: {}".format(output_filename)) @@ -135,7 +137,4 @@ class SC: if save_result and output_path: export_to_json(info, output_path) print_warning_info() - return info - - -__all__ = ["SC"] + return info \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/src/misc.py b/tools/rom_ram_analyzer/lite_small/src/misc.py index c7a7a6f99848cf53550c01e332a11dfd635b8c96..68b0ec727da325faa84dccd9e0f5b21f3a028933 100644 --- a/tools/rom_ram_analyzer/lite_small/src/misc.py +++ b/tools/rom_ram_analyzer/lite_small/src/misc.py @@ -31,10 +31,6 @@ from pkgs.basic_tool import BasicTool _config = SimpleYamlTool.read_yaml("config.yaml") -""" -===============info handlers=============== -""" - def extension_handler(paragraph: Text): return GnVariableParser.string_parser("output_extension", paragraph).strip('"') @@ -53,11 +49,6 @@ def mod_handler(paragraph: Text): return GnVariableParser.string_parser("mode", paragraph).strip('"') -""" -===============gn lineno collector=============== -""" - - def gn_lineno_collect(match_pattern: str, project_path: str) -> DefaultDict[str, List[int]]: """ 在整个项目路径下搜索有特定target类型的BUILD.gn @@ -85,11 +76,6 @@ def gn_lineno_collect(match_pattern: str, project_path: str) -> DefaultDict[str, return gn_line_dict -""" -===============target name parser=============== -""" - - class TargetNameParser: @classmethod def single_parser(cls, paragraph: Text) -> str: @@ -110,19 +96,15 @@ class TargetNameParser: return BasicTool.re_group_1(paragraph, r"\w+\(.*?, *(.*?)\)") -""" -===============post handlers=============== -""" - class BasePostHandler(ABC): + def __call__(self, unit: Dict[str, AnyStr]) -> str: + return self.run(unit) + @abstractmethod def run(self, unit: Dict[str, AnyStr]) -> str: ... - def __call__(self, unit: Dict[str, AnyStr]) -> str: - return self.run(unit) - def add_prefix(content: str, prefix: str) -> str: if content and (not content.startswith(prefix)): @@ -292,4 +274,4 @@ def TargetS2MPostHandler(unit: Dict, result_dict: Dict) -> None: tmp_s = copy.deepcopy(unit) tmp_s["real_target_type"] = "shared_library" k = LiteLibPostHandler()(tmp_s) - result_dict["target"][k] = tmp_s + result_dict["target"][k] = tmp_s \ No newline at end of file diff --git a/tools/rom_ram_analyzer/lite_small/src/rom_analysis.py b/tools/rom_ram_analyzer/lite_small/src/rom_analysis.py index bd2e91f9e1f5d7e223b031fdf830b11fcf57a7e9..9cc08cccca5c34f94788efd16aeed4acce59bb7d 100644 --- a/tools/rom_ram_analyzer/lite_small/src/rom_analysis.py +++ b/tools/rom_ram_analyzer/lite_small/src/rom_analysis.py @@ -15,6 +15,16 @@ # This file is for rom analyzation of lite/small devices. +""" +1. 先收集BUILD.gn中的target信息 +2. 然后根据编译产物到1中进行搜索,匹配其所属的部件 + +对于ohos开头的template,主要根据其component字段和subsystem_name字段来归数其部件;同时也要考虑install_dir字段 +对于gn原生的template,主要根据bundle.json中的字段来归属其部件 + +对于找不到的,可以模糊匹配,如,有产物libxxx,则可以在所有的BUILD.gn中搜索xxx,并设置一个阀值予以过滤 +""" + import sys import argparse import json @@ -37,18 +47,43 @@ from pkgs.rom_ram_baseline_collector import RomRamBaselineCollector from misc import gn_lineno_collect -""" -1. 先收集BUILD.gn中的target信息 -2. 然后根据编译产物到1中进行搜索,匹配其所属的部件 - -对于ohos开头的template,主要根据其component字段和subsystem_name字段来归数其部件;同时也要考虑install_dir字段 -对于gn原生的template,主要根据bundle.json中的字段来归属其部件 - -对于找不到的,可以模糊匹配,如,有产物libxxx,则可以在所有的BUILD.gn中搜索xxx,并设置一个阀值予以过滤 -""" - - class RomAnalysisTool: + @classmethod + def analysis(cls, product_name: str, product_dict: Dict[str, List[str]], output_file_name: str): + """analysis the rom of lite/small product + + Args: + product_name (str): product name configured in the yaml + product_dict (Dict[str, List[str]]): result dict of compiled product file + format: + "bin":[...], + "so":[...] + ... + """ + logging.info("start analyzing...") + rom_ram_baseline: Dict[str, Dict] = RomRamBaselineCollector.collect( + project_path) + with os.fdopen(os.open("rom_ram_baseline.json", os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: + json.dump(rom_ram_baseline, f, indent=4) + gn_info_file = configs["gn_info_file"] # filename to save gn_info + with open(gn_info_file, 'r', encoding='utf-8') as f: + gn_info = json.load(f) + query_order: Dict[str, List[str] + ] = configs[product_name]["query_order"] # query order of the gn template to be matched + query_order["etc"] = configs["target_type"] # etc会查找所有的template + rom_size_dict: Dict = dict() + if "manual_config" in configs[product_name].keys(): + cls._match_manual_configured( + configs[product_name]["manual_config"], product_dict, configs[product_name]["product_dir"]["root"], rom_size_dict) + cls._subsystem_component_for_all_product_file( + product_dict, query_order, gn_info, gn_info_file, rom_ram_baseline, rom_size_dict) + if unit_adapt: + cls._result_unit_adaptive(rom_size_dict) + with os.fdopen(os.open(output_file_name + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: + json.dump(rom_size_dict, f, indent=4) + cls._save_as_xls(rom_size_dict, product_name, baseline) + logging.info("success") + @classmethod def collect_gn_info(cls): logging.info("start scanning BUILD.gn") @@ -59,7 +94,7 @@ class RomAnalysisTool: for f in future_list: f.result() gn_info_file = configs["gn_info_file"] - with open(gn_info_file, 'w', encoding='utf-8') as f: + with os.fdopen(os.open(gn_info_file, os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: json.dump(result_dict, f, indent=4) @classmethod @@ -145,7 +180,7 @@ class RomAnalysisTool: def collect_product_info(cls, product_name: str): logging.info("start scanning compile products") product_dict: Dict[str, List[str]] = cls._find_files(product_name) - with open(configs[product_name]["product_infofile"], 'w', encoding='utf-8') as f: + with os.fdopen(os.open(configs[product_name]["product_infofile"], os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: json.dump(product_dict, f, indent=4) return product_dict @@ -393,42 +428,6 @@ class RomAnalysisTool: cls._iterate_all_template_type( type_list, gn_info, gn_info_file, base_name, rom_ram_baseline, rom_size_dict, f, size) - @classmethod - def analysis(cls, product_name: str, product_dict: Dict[str, List[str]], output_file_name: str): - """analysis the rom of lite/small product - - Args: - product_name (str): product name configured in the yaml - product_dict (Dict[str, List[str]]): result dict of compiled product file - format: - "bin":[...], - "so":[...] - ... - """ - logging.info("start analyzing...") - rom_ram_baseline: Dict[str, Dict] = RomRamBaselineCollector.collect( - project_path) - with open("rom_ram_baseline.json", 'w', encoding='utf-8') as f: - json.dump(rom_ram_baseline, f, indent=4) - gn_info_file = configs["gn_info_file"] # filename to save gn_info - with open(gn_info_file, 'r', encoding='utf-8') as f: - gn_info = json.load(f) - query_order: Dict[str, List[str] - ] = configs[product_name]["query_order"] # query order of the gn template to be matched - query_order["etc"] = configs["target_type"] # etc会查找所有的template - rom_size_dict: Dict = dict() - if "manual_config" in configs[product_name].keys(): - cls._match_manual_configured( - configs[product_name]["manual_config"], product_dict, configs[product_name]["product_dir"]["root"], rom_size_dict) - cls._subsystem_component_for_all_product_file( - product_dict, query_order, gn_info, gn_info_file, rom_ram_baseline, rom_size_dict) - if unit_adapt: - cls._result_unit_adaptive(rom_size_dict) - with open(output_file_name + ".json", 'w', encoding='utf-8') as f: - json.dump(rom_size_dict, f, indent=4) - cls._save_as_xls(rom_size_dict, product_name, baseline) - logging.info("success") - def main(): if recollect_gn: diff --git a/tools/rom_ram_analyzer/lite_small/src/template_processor.py b/tools/rom_ram_analyzer/lite_small/src/template_processor.py index b5378b55b22616a3f5548f5869245fa444e49b1a..0ddc50c1fd732dbee2825e16854aa01abc78e211 100644 --- a/tools/rom_ram_analyzer/lite_small/src/template_processor.py +++ b/tools/rom_ram_analyzer/lite_small/src/template_processor.py @@ -78,6 +78,10 @@ class BaseProcessor(ABC): self.unit_post_handler = unit_post_handler self.resource_field = resource_field self.ud_post_handler = ud_post_handler + + + def __call__(self, *args, **kwargs): + self.run() def _append(self, key: str, unit: Dict) -> None: """ @@ -108,9 +112,6 @@ class BaseProcessor(ABC): def run(self): ... - def __call__(self, *args, **kwargs): - self.run() - def _gn_var_process(project_path: str, gn_v: str, alt_v: str, gn_path: str, ifrom: str, efrom: str, strip_quote: bool = False) -> Tuple[str, str]: """ diff --git a/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py b/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py index cae62a363e2775103af0bc3b5aa80a84b6c93869..1eaed52233b286f28b9340ef483e9465c0b7cff5 100644 --- a/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py +++ b/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py @@ -85,6 +85,34 @@ class GnCommonTool: GnCommonTool.__var_val_mem_dict[v] = output path = os.path.split(path)[0] return tuple(var_val_dict.values()) + + @classmethod + def find_part_subsystem(cls, gn_file: str, project_path: str) -> tuple: + """ + 查找gn_file对应的part_name和subsystem + 如果在gn中找不到,就到bundle.json中去找 + """ + part_var_flag = False # 标识这个变量从gn中取出的原始值是不是变量 + subsystem_var_flag = False + var_list = list() + part_name_pattern = r"part_name *=\s*\S*" + subsystem_pattern = r"subsystem_name *=\s*\S*" + meta_grep_pattern = "grep -E '{}' {} | head -n 1" + part_cmd = meta_grep_pattern.format(part_name_pattern, gn_file) + subsystem_cmd = meta_grep_pattern.format(subsystem_pattern, gn_file) + + part_name, subsystem_name = cls._parse_part_subsystem(part_var_flag, subsystem_var_flag, + var_list, part_cmd, subsystem_cmd, gn_file, project_path) + if part_name and subsystem_name: + return part_name, subsystem_name + # 如果有一个没有找到,就要一层层去找bundle.json文件 + t_part_name, t_subsystem_name = cls.__find_part_subsystem_from_bundle( + gn_file, stop_tail=project_path) + if t_part_name: + part_name = t_part_name + if t_subsystem_name: + subsystem_name = t_subsystem_name + return part_name, subsystem_name @classmethod def __find_part_subsystem_from_bundle(cls, gnpath: str, stop_tail: str = "home") -> tuple: @@ -157,34 +185,6 @@ class GnCommonTool: t) != 0 else subsystem_name return part_name, subsystem_name - @classmethod - def find_part_subsystem(cls, gn_file: str, project_path: str) -> tuple: - """ - 查找gn_file对应的part_name和subsystem - 如果在gn中找不到,就到bundle.json中去找 - """ - part_var_flag = False # 标识这个变量从gn中取出的原始值是不是变量 - subsystem_var_flag = False - var_list = list() - part_name_pattern = r"part_name *=\s*\S*" - subsystem_pattern = r"subsystem_name *=\s*\S*" - meta_grep_pattern = "grep -E '{}' {} | head -n 1" - part_cmd = meta_grep_pattern.format(part_name_pattern, gn_file) - subsystem_cmd = meta_grep_pattern.format(subsystem_pattern, gn_file) - - part_name, subsystem_name = cls._parse_part_subsystem(part_var_flag, subsystem_var_flag, - var_list, part_cmd, subsystem_cmd, gn_file, project_path) - if part_name and subsystem_name: - return part_name, subsystem_name - # 如果有一个没有找到,就要一层层去找bundle.json文件 - t_part_name, t_subsystem_name = cls.__find_part_subsystem_from_bundle( - gn_file, stop_tail=project_path) - if t_part_name: - part_name = t_part_name - if t_subsystem_name: - subsystem_name = t_subsystem_name - return part_name, subsystem_name - class GnVariableParser: @classmethod diff --git a/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py b/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py index fc6d2f7dc11688c54b87b4035d6d37eb4f6f828f..8a181d0e445880c326a0f3b3239f29ef1d99aa5b 100644 --- a/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py +++ b/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py @@ -15,30 +15,20 @@ # This file is to collect baseline information (according to bundle.json) +from typing import Dict, Any +import json +import logging if __name__ == '__main__': from basic_tool import BasicTool else: from pkgs.basic_tool import BasicTool -from typing import Dict, Any -import json -import logging class RomRamBaselineCollector: """collect baseline of rom and ram from bundle.json """ - @classmethod - def _put(cls, result_dict: Dict, subsystem_name: str, component_name: str, rom_size: str, ram_size: str, - bundle_path: str) -> None: - if not result_dict.get(subsystem_name): - result_dict[subsystem_name] = dict() - result_dict[subsystem_name][component_name] = dict() - result_dict[subsystem_name][component_name]["rom"] = rom_size - result_dict[subsystem_name][component_name]["ram"] = ram_size - result_dict[subsystem_name][component_name]["bundle.json"] = bundle_path - @classmethod def collect(cls, oh_path: str) -> Dict[str, Dict]: def post_handler(x: str) -> list: @@ -66,3 +56,13 @@ class RomRamBaselineCollector: cls._put(rom_ram_baseline_dict, subsystem_name, component_name, rom_baseline, ram_baseline, bundle) return rom_ram_baseline_dict + + @classmethod + def _put(cls, result_dict: Dict, subsystem_name: str, component_name: str, rom_size: str, ram_size: str, + bundle_path: str) -> None: + if not result_dict.get(subsystem_name): + result_dict[subsystem_name] = dict() + result_dict[subsystem_name][component_name] = dict() + result_dict[subsystem_name][component_name]["rom"] = rom_size + result_dict[subsystem_name][component_name]["ram"] = ram_size + result_dict[subsystem_name][component_name]["bundle.json"] = bundle_path \ No newline at end of file diff --git a/tools/rom_ram_analyzer/standard/pkgs/simple_excel_writer.py b/tools/rom_ram_analyzer/standard/pkgs/simple_excel_writer.py index 60feeb0b26918bc3c6b46a6185ddb57444349266..6e5f0846fdf33ebb4049def47861ccb382cfec09 100644 --- a/tools/rom_ram_analyzer/standard/pkgs/simple_excel_writer.py +++ b/tools/rom_ram_analyzer/standard/pkgs/simple_excel_writer.py @@ -55,20 +55,6 @@ class SimpleExcelWriter: self.__head_style.pattern = ptrn self.__content_style.alignment = algmt - def __increment_y(self, sheet_name: str, value: int = 1) -> int: - if sheet_name in self.__sheet_pos.keys(): - x, y = self.__sheet_pos.get(sheet_name) - y = y + value - self.__sheet_pos[sheet_name] = (x, y) - return y - - def __increment_x(self, sheet_name: str, value: int = 1) -> int: - if sheet_name in self.__sheet_pos.keys(): - x, y = self.__sheet_pos.get(sheet_name) - x = x + value - self.__sheet_pos[sheet_name] = (x, 0) - return x - def append_line(self, content: list, sheet_name: str = None): sheet_name = self.__default_sheet_name if sheet_name is None else sheet_name if sheet_name not in self.__sheet_dict.keys(): @@ -121,6 +107,20 @@ class SimpleExcelWriter: def save(self, file_name: str): self.__book.save(file_name) + def __increment_y(self, sheet_name: str, value: int = 1) -> int: + if sheet_name in self.__sheet_pos.keys(): + x, y = self.__sheet_pos.get(sheet_name) + y = y + value + self.__sheet_pos[sheet_name] = (x, y) + return y + + def __increment_x(self, sheet_name: str, value: int = 1) -> int: + if sheet_name in self.__sheet_pos.keys(): + x, y = self.__sheet_pos.get(sheet_name) + x = x + value + self.__sheet_pos[sheet_name] = (x, 0) + return x + if __name__ == '__main__': writer = SimpleExcelWriter(default_sheet_name="first") diff --git a/tools/rom_ram_analyzer/standard/ram_analyzer.py b/tools/rom_ram_analyzer/standard/ram_analyzer.py index cc354fb017f911ed39d469d64ff0e82a6d97b6fb..02482c30b24ac302a1e3f21153d21f4858c627ce 100644 --- a/tools/rom_ram_analyzer/standard/ram_analyzer.py +++ b/tools/rom_ram_analyzer/standard/ram_analyzer.py @@ -79,6 +79,47 @@ def delete_values_from_dict(target_dict: typing.Dict, key_list: typing.Iterable) class RamAnalyzer: + @classmethod + def analysis(cls, cfg_path: str, json_path: str, rom_result_json: str, device_num: str, + output_file: str, ss: str, output_excel: bool, baseline_file: str, unit_adapt: bool): + """ + process size subsystem/component so so_size + """ + if not HDCTool.verify_hdc(): + print("error: Command 'hdc' not found") + return + if not HDCTool.verify_device(device_num): + print("error: {} is inaccessible or not found".format(device_num)) + return + with open(rom_result_json, 'r', encoding='utf-8') as f: + rom_result_dict: typing.Dict = json.loads(f.read()) + # 从rom的分析结果中将需要的elf信息重组 + so_info_dict: typing.Dict[ + str, typing.Dict[str["component_name|subsystem_name|size"], str]] = cls.get_elf_info_from_rom_result( + rom_result_json) + process_elf_dict: typing.Dict[str, typing.List[str]] = cls.get_process_so_relationship(cfg_path, + json_path) + process_size_dict: typing.Dict[str, int] = cls.process_hidumper_info( + device_num, ss) + result_dict: typing.Dict[str, typing.Dict[str, typing.Any]] = dict() + result_dict = cls.result_process4(result_dict, process_size_dict, rom_result_dict, process_elf_dict, + so_info_dict) + base_dir, _ = os.path.split(output_file) + if len(base_dir) != 0 and not os.path.isdir(base_dir): + os.makedirs(base_dir, exist_ok=True) + with os.fdopen(os.open(output_file + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: + json.dump(result_dict, f, indent=4) + refactored_result: Dict[str, Dict] = refacotr_result(result_dict) + if unit_adapt: + cls.refactored_result_unit_adaptive(refactored_result) + if baseline_file: + cls.add_baseline(refactored_result, baseline_file) + with os.fdopen(os.open(f"refactored_{output_file}.json", os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: + json.dump(refactored_result, f, indent=4) + if output_excel: + cls.__save_result_as_excel( + refactored_result, output_file + ".xls", ss, baseline_file, unit_adapt) + @classmethod def __hidumper_mem_line_process(cls, content: typing.Text) -> typing.List[typing.Text]: """ @@ -527,47 +568,6 @@ class RamAnalyzer: result_dict[process_name][subsystem_name][component_name][so] = so_size return result_dict - @classmethod - def analysis(cls, cfg_path: str, json_path: str, rom_result_json: str, device_num: str, - output_file: str, ss: str, output_excel: bool, baseline_file: str, unit_adapt: bool): - """ - process size subsystem/component so so_size - """ - if not HDCTool.verify_hdc(): - print("error: Command 'hdc' not found") - return - if not HDCTool.verify_device(device_num): - print("error: {} is inaccessible or not found".format(device_num)) - return - with open(rom_result_json, 'r', encoding='utf-8') as f: - rom_result_dict: typing.Dict = json.loads(f.read()) - # 从rom的分析结果中将需要的elf信息重组 - so_info_dict: typing.Dict[ - str, typing.Dict[str["component_name|subsystem_name|size"], str]] = cls.get_elf_info_from_rom_result( - rom_result_json) - process_elf_dict: typing.Dict[str, typing.List[str]] = cls.get_process_so_relationship(cfg_path, - json_path) - process_size_dict: typing.Dict[str, int] = cls.process_hidumper_info( - device_num, ss) - result_dict: typing.Dict[str, typing.Dict[str, typing.Any]] = dict() - result_dict = cls.result_process4(result_dict, process_size_dict, rom_result_dict, process_elf_dict, - so_info_dict) - base_dir, _ = os.path.split(output_file) - if len(base_dir) != 0 and not os.path.isdir(base_dir): - os.makedirs(base_dir, exist_ok=True) - with open(output_file + ".json", 'w', encoding='utf-8') as f: - json.dump(result_dict, f, indent=4) - refactored_result: Dict[str, Dict] = refacotr_result(result_dict) - if unit_adapt: - cls.refactored_result_unit_adaptive(refactored_result) - if baseline_file: - cls.add_baseline(refactored_result, baseline_file) - with open(f"refactored_{output_file}.json", 'w', encoding='utf-8') as f: - json.dump(refactored_result, f, indent=4) - if output_excel: - cls.__save_result_as_excel( - refactored_result, output_file + ".xls", ss, baseline_file, unit_adapt) - def inside_refacotr_result(component_info, refactored_ram_dict, subsystem_name, component_name, process_name, process_size): @@ -646,4 +646,4 @@ if __name__ == '__main__': unit_adaptiv = args.unit_adaptive RamAnalyzer.analysis(cfg_path_name, profile_path_name, rom_result, device_num=device, output_file=output_filename, ss="Pss", output_excel=output_excel_path, - baseline_file=baseline, unit_adapt=unit_adaptiv) + baseline_file=baseline, unit_adapt=unit_adaptiv) \ No newline at end of file diff --git a/tools/rom_ram_analyzer/standard/rom_analyzer.py b/tools/rom_ram_analyzer/standard/rom_analyzer.py index 08058a61422f87c36da1d174f4e7a54becf12c12..f625cfa6c98655f5ba703d37889026579bb4a7e2 100644 --- a/tools/rom_ram_analyzer/standard/rom_analyzer.py +++ b/tools/rom_ram_analyzer/standard/rom_analyzer.py @@ -43,6 +43,16 @@ class PreCollector: self.info_dict: Dict[str, Any] = dict() self.project_path = BasicTool.get_abs_path(project_path) self.result_dict = dict() + + def collect_sa_profile(self): + grep_kw = r"ohos_sa_profile" + grep_cmd = f"grep -rn '{grep_kw}' --include=BUILD.gn {self.project_path}" + content = BasicTool.execute( + grep_cmd, post_processor=lambda x: x.split('\n')) + for item in content: + if not item: + continue + self._process_single_sa(item, start_pattern=grep_kw) def _process_single_sa(self, item: str, start_pattern: str): gn, _, _ = item.split(':') @@ -64,18 +74,58 @@ class PreCollector: "gn_path": gn } - def collect_sa_profile(self): - grep_kw = r"ohos_sa_profile" - grep_cmd = f"grep -rn '{grep_kw}' --include=BUILD.gn {self.project_path}" - content = BasicTool.execute( - grep_cmd, post_processor=lambda x: x.split('\n')) - for item in content: - if not item: - continue - self._process_single_sa(item, start_pattern=grep_kw) - class RomAnalyzer: + @classmethod + def analysis(cls, system_module_info_json: Text, product_dirs: List[str], + project_path: Text, product_name: Text, output_file: Text, output_execel: bool, add_baseline: bool, + unit_adapt: bool): + """ + system_module_info_json: json文件 + product_dirs:要处理的产物的路径列表如["vendor", "system/"] + project_path: 项目根路径 + product_name: eg,rk3568 + output_file: basename of output file + """ + project_path = BasicTool.get_abs_path(project_path) + rom_baseline_dict: Dict[str, Any] = RomRamBaselineCollector.collect( + project_path) + with os.fdopen(os.open("rom_ram_baseline.json", os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: + json.dump(rom_baseline_dict, f, indent=4) + phone_dir = os.path.join( + project_path, "out", product_name, "packages", "phone") + product_dirs = [os.path.join(phone_dir, d) for d in product_dirs] + pre_collector = PreCollector(project_path) + pre_collector.collect_sa_profile() + extra_product_info_dict: Dict[str, Dict] = pre_collector.result_dict + product_info_dict = cls.__collect_product_info( + system_module_info_json, project_path, + extra_info=extra_product_info_dict) # collect product info from json file + result_dict: Dict[Text:Dict] = dict() + for d in product_dirs: + file_list: List[Text] = BasicTool.find_all_files(d) + for f in file_list: + size = os.path.getsize(f) + relative_filepath = f.replace(phone_dir, "").lstrip(os.sep) + unit: Dict[Text, Any] = product_info_dict.get( + relative_filepath) + if not unit: + bf = f.split('/')[-1] + unit: Dict[Text, Any] = product_info_dict.get(bf) + if not unit: + unit = dict() + unit["size"] = size + unit["relative_filepath"] = relative_filepath + cls.__put(unit, result_dict, rom_baseline_dict, add_baseline) + output_dir, _ = os.path.split(output_file) + if len(output_dir) != 0: + os.makedirs(output_dir, exist_ok=True) + if unit_adapt: + cls.result_unit_adaptive(result_dict) + with os.fdopen(os.open(output_file + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), 'w', encoding='utf-8') as f: + f.write(json.dumps(result_dict, indent=4)) + if output_execel: + cls.__save_result_as_excel(result_dict, output_file, add_baseline) @classmethod def __collect_product_info(cls, system_module_info_json: Text, @@ -249,57 +299,6 @@ class RomAnalyzer: subsystem_info["size"] = size subsystem_info["file_count"] = count - @classmethod - def analysis(cls, system_module_info_json: Text, product_dirs: List[str], - project_path: Text, product_name: Text, output_file: Text, output_execel: bool, add_baseline: bool, - unit_adapt: bool): - """ - system_module_info_json: json文件 - product_dirs:要处理的产物的路径列表如["vendor", "system/"] - project_path: 项目根路径 - product_name: eg,rk3568 - output_file: basename of output file - """ - project_path = BasicTool.get_abs_path(project_path) - rom_baseline_dict: Dict[str, Any] = RomRamBaselineCollector.collect( - project_path) - with open("rom_ram_baseline.json", 'w', encoding='utf-8') as f: - json.dump(rom_baseline_dict, f, indent=4) - phone_dir = os.path.join( - project_path, "out", product_name, "packages", "phone") - product_dirs = [os.path.join(phone_dir, d) for d in product_dirs] - pre_collector = PreCollector(project_path) - pre_collector.collect_sa_profile() - extra_product_info_dict: Dict[str, Dict] = pre_collector.result_dict - product_info_dict = cls.__collect_product_info( - system_module_info_json, project_path, - extra_info=extra_product_info_dict) # collect product info from json file - result_dict: Dict[Text:Dict] = dict() - for d in product_dirs: - file_list: List[Text] = BasicTool.find_all_files(d) - for f in file_list: - size = os.path.getsize(f) - relative_filepath = f.replace(phone_dir, "").lstrip(os.sep) - unit: Dict[Text, Any] = product_info_dict.get( - relative_filepath) - if not unit: - bf = f.split('/')[-1] - unit: Dict[Text, Any] = product_info_dict.get(bf) - if not unit: - unit = dict() - unit["size"] = size - unit["relative_filepath"] = relative_filepath - cls.__put(unit, result_dict, rom_baseline_dict, add_baseline) - output_dir, _ = os.path.split(output_file) - if len(output_dir) != 0: - os.makedirs(output_dir, exist_ok=True) - if unit_adapt: - cls.result_unit_adaptive(result_dict) - with open(output_file + ".json", 'w', encoding='utf-8') as f: - f.write(json.dumps(result_dict, indent=4)) - if output_execel: - cls.__save_result_as_excel(result_dict, output_file, add_baseline) - def get_args(): VERSION = 2.0