diff --git a/OAT.xml b/OAT.xml
index 80623c1f6669ea19a7619575f2ce992d01e30773..631ca8bd9406411b1f79442490689894adc9ccd2 100644
--- a/OAT.xml
+++ b/OAT.xml
@@ -65,6 +65,8 @@ Note:If the text contains special characters, please escape them according to th
+
+
diff --git a/tools/components_deps/README.md b/tools/components_deps/README.md
index c5d0cdffc239e240cc2ab3e31e633461ab0b16bb..18c9a51c37e69df499b0e7671481f2d15ba5c19a 100644
--- a/tools/components_deps/README.md
+++ b/tools/components_deps/README.md
@@ -2,74 +2,65 @@
## 功能介绍
-基于config.json、编译产物out/{product_name}/build_configs/parts_info/parts_deps.json,分析各必选部件和可选部件间的依赖关系。
+基于vendor下的config.json、xml文件中的开源部件集、BUILD.gn文件,分析是否存在对闭源部件的依赖以及是否存在对非必选部件的无条件依赖。
结果以json格式进行存储。
## 支持产品
-主要是rk3568系列,已测试产品包括rk3568、rk3568_mini_system、pc_mini_system、tablet_mini_system、phone_mini_system
-
-## 实现思路
-
-利用产品部件定义文件config.json和编译构建自动生成的out/{product_name}/build_configs/parts_info/parts_deps.json中已有的信息重新组织,查找相关BUILD.gn文件路径。
+config.json文件主要是关于rk3568系列,已测试产品包括rk3568、rk3568_mini_system、pc_mini_system、tablet_mini_system、phone_mini_system的config.json文件
## 使用说明
前置条件:
-1. 获取整个components_deps目录
-1. 对系统进行编译
+1. 获取BUILD.gn文件
+1. 获取包含开源部件集的xml文件
+1. 获取包含部件集定义的config.json文件
1. python3及以后
命令介绍:
1. `-h`或`--help`命令查看帮助
```shell
- > python3 components_deps_analyzer.py -h
- usage: components_deps_analyzer.py [-h] -c CONFIG_JSON -d PARTS_DEPS_JSON [-s SINGLE_COMPONENT_NAME] [-o OUTPUT_FILE]
+ > python components_deps_analyzer.py --help
+ usage: components_deps_analyzer.py [-h] -p COMPONENTS_GN_PATH_LIST -c CONFIG_PATH -o OPEN_COMPONENT_XML_PATH [-r RESULT_JSON_NAME]
- -s SINGLE_COMPONENT_NAME, --single_component_name SINGLE_COMPONENT_NAME
- single component name
- -o OUTPUT_FILE, --output_file OUTPUT_FILE
- eg: demo/components_deps
+ analyze components deps.
+
+ optional arguments:
+ -p COMPONENTS_GN_PATH_LIST, --components_gn_path_list COMPONENTS_GN_PATH_LIST
+ path of pr BUILD.gn
+ -g GN_COMPONENT, --gn_component GN_COMPONENT
+ gn file corresponding component
+ -c CONFIG_PATH, --config_path CONFIG_PATH
+ path of config_file
+ -o OPEN_COMPONENT_XML_PATH, --open_component_xml_path OPEN_COMPONENT_XML_PATH
+ open component name set
+ -r RESULT_JSON_NAME, --result_json_name RESULT_JSON_NAME
+ name of output_json
```
1. 使用示例
```shell
- python components_deps_analyzer.py -c vendor/hihope/rk3568/config.json -d out/rk3568/build_configs/parts_info/parts_deps.json -o components_dep -s ability_runtime
+ python components_deps_analyzer.py -p BUILD.gn,pkgs/BUILD.gn -g ace_engine,cef -c config_path -o .\gn_xml\ohos.xml
```
-## 输出格式介绍(components_dep.json)
-
-```
-{
- 必选部件名: {
- 无条件依赖的可选部件名1:
- [BUILD.gn文件路径1,
- BUILD.gn文件路径2,
- ...],
- 无条件依赖的可选部件名2:
- [BUILD.gn文件路径1,
- BUILD.gn文件路径2,
- ...],
- ...
- }
-}
-```
-
-## 单部件输出格式介绍(ability_runtime.json)
+## 输出格式介绍(result.json)
```
-{
- 无条件依赖的可选部件名1:
- [BUILD.gn文件路径1,
- BUILD.gn文件路径2,
- ...],
- 无条件依赖的可选部件名2:
- [BUILD.gn文件路径1,
- BUILD.gn文件路径2,
- ...],
+[
+ {
+ "file_path": BUILD.gn文件路径,
+ "error": [
+ {
+ "line": 行号,
+ "rule": 触发规则,
+ "detail": 详细说明
+ },
+ ...
+ ]
+ },
...
-}
-```
+]
+```
\ No newline at end of file
diff --git a/tools/components_deps/components_deps_analyzer.py b/tools/components_deps/components_deps_analyzer.py
index 6c78456b9132c18b4b365e1d8c2faee1b300c60c..ffd001ba6a961f4af84c0a31b32850f31d76f4ef 100644
--- a/tools/components_deps/components_deps_analyzer.py
+++ b/tools/components_deps/components_deps_analyzer.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# This file contains the comparison between mandatory components and the actual compiled components.
+# This file provide the detection tool for unconditional dependence of required components on optional components.
import argparse
import json
@@ -23,111 +23,131 @@ import re
class Analyzer:
@classmethod
- def __get_components(cls, config: str):
- mandatory_components = list()
- optional_components = list()
- with open(config, 'r', encoding='utf-8') as r:
- config_json = json.load(r)
- inherit = config_json['inherit']
- for json_name in inherit:
- with open(json_name, 'r', encoding='utf-8') as r:
- inherit_file = json.load(r)
- for subsystem in inherit_file['subsystems']:
- for component in subsystem['components']:
- mandatory_components.append(component['component'])
+ def __get_open_components(cls, xml_path):
+ open_components = list()
+ with open(xml_path, 'r', encoding='utf-8') as r:
+ xml_info = r.readlines()
+ for line in xml_info:
+ if "path=" in line:
+ tmp = re.findall('path="(.*?)"', line)[0]
+ open_components.append(tmp.split('/')[-1])
+ return open_components
+
+ @classmethod
+ def __deal_config_json(cls, config_json):
+ components = list()
for subsystem in config_json['subsystems']:
for component in subsystem['components']:
- if component not in mandatory_components:
- optional_components.append(component['component'])
- return mandatory_components, optional_components
+ if component not in components:
+ components.append(component['component'])
+ return components
+
+ @classmethod
+ def __get_required_components(cls, config_path: str):
+ required_components = list()
+ files = os.listdir(config_path)
+ for file in files:
+ if file.endswith(".json"):
+ with open(os.path.join(config_path, file), 'r', encoding='utf-8') as r:
+ config_json = json.load(r)
+ required_components += cls.__deal_config_json(config_json)
+ return required_components
@classmethod
- def __get_gn_path(cls, parts_deps: str, mandatory: list):
- mandatory_gn_path = dict()
- with open(parts_deps, 'r', encoding='utf-8') as r:
- parts_deps_json = json.load(r)
- for component in parts_deps_json:
- if component in mandatory and parts_deps_json[component]:
- mandatory_gn_path[component] = '/'.join(
- parts_deps_json[component]['build_config_file'].split('/')[:-1])
- return mandatory_gn_path
+ def __get_line(cls, txt_list, key_words: str):
+ for i in range(len(txt_list)):
+ if key_words in txt_list[i]:
+ return i + 1
+ return 0
@classmethod
- def __judge_deps(cls, gn_path: str, optional_components):
+ def __judge_deps(cls, gn_path: str, open_components_list, optional_components):
+ error = list()
deps = list()
+ dependent_close = True
with open(gn_path, 'r', encoding='utf-8') as r:
gn = r.readlines()
txt = ''
for line in gn:
- txt += line.strip()
- for optional_component in optional_components:
- dep_txt = re.findall('deps = \[(.*?)\]', txt) + re.findall('deps += \[(.*?)\]', txt)
+ txt += line
+ key_txt = ' '.join(re.findall('if \(.+?\{(.*?)\}', txt))
+ for component in open_components_list:
+ if dependent_close == True:
+ if component in txt:
+ dependent_close = False
+ for i in range(len(gn)):
+ dep_txt = re.findall('deps = \[(.*?)\]', gn[i]) + re.findall('deps += \[(.*?)\]', gn[i])
dep_info = list()
for info in dep_txt:
if '/' in info:
dep_info += re.findall('/(.*?):', info)
else:
dep_info += re.findall('"(.*?):', info)
- if optional_component in dep_info:
- key_txt = ' '.join(re.findall('if \(.+?\{(.*?)\}', txt))
- if optional_component not in key_txt:
- deps.append({'component': optional_component, 'gn_path': gn_path})
- return deps
+ for component in optional_components:
+ if component in dep_info and component not in key_txt:
+ deps.append((component, i + 1))
+ if dependent_close == True and re.findall('deps =', txt):
+ line = cls.__get_line(gn, 'deps =')
+ error.append(
+ {"line": line, "rule": "depend close component", "detail": "可能依赖闭源部件,请检查deps中的内容"})
+ for one_dep in deps:
+ error.append({"line": one_dep[1], "rule": "depend optional component",
+ "detail": "依赖开源部件中的非必选部件{},请检查deps中的内容".format(one_dep[0])})
+ return gn_path, error
@classmethod
- def __get_deps(cls, mandatory_gn: dict, optional_components_list: list):
- all_deps = dict()
- for component in mandatory_gn.keys():
- component_deps = list()
- total_deps = dict()
- for root, _, files in os.walk(mandatory_gn[component]):
- if 'BUILD.gn' in files:
- component_deps += cls.__judge_deps(os.path.join(root, 'BUILD.gn'), optional_components_list)
- for one_dep in component_deps:
- if one_dep['component'] not in total_deps.keys():
- total_deps[one_dep['component']] = [one_dep['gn_path']]
- else:
- total_deps[one_dep['component']].append(one_dep['gn_path'])
- all_deps[component] = total_deps
- return all_deps
-
- @classmethod
- def analysis(cls, config_path: str, parts_deps_path: str, output_file: str, single_component: str):
+ def analysis(cls, gn_path_list, gn_component, config_path: str, open_components_path, result_json_name: str):
if not os.path.exists(config_path):
print("error: {} is inaccessible or not found".format(config_path))
return
- if not os.path.exists(parts_deps_path):
- print("error: {} is inaccessible or not found".format(parts_deps_path))
+ if not os.path.exists(open_components_path):
+ print("error: {} is inaccessible or not found".format(open_components_path))
return
- mandatory_components, optional_components = cls.__get_components(config_path)
- mandatory_components_gn_path = cls.__get_gn_path(parts_deps_path, mandatory_components)
- deps = cls.__get_deps(mandatory_components_gn_path, optional_components)
- with os.fdopen(os.open(output_file + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), "w") as fd:
- json.dump(deps, fd, indent=4)
- if single_component != 'false' and single_component in deps.keys():
- with os.fdopen(os.open(single_component + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), "w") as fd:
- json.dump(deps[single_component], fd, indent=4)
+ if len(gn_path_list) != len(gn_component):
+ print(
+ "error: The component corresponding to the gn file and the gn file path are not in one-to-one correspondence.")
+ return
+ required_components = cls.__get_required_components(config_path)
+ open_components = cls.__get_open_components(open_components_path)
+ optional_components = list()
+ for components in open_components:
+ if components not in required_components:
+ optional_components.append(components)
+ result = list()
+ for i in range(len(gn_path_list)):
+ one_result = dict()
+ if gn_component[i] in required_components:
+ one_result["file_path"], one_result["error"] = cls.__judge_deps(gn_path_list[i], open_components,
+ optional_components)
+ else:
+ one_result["file_path"], one_result["error"] = gn_path_list[i], []
+ result.append(one_result)
+ with os.fdopen(os.open(result_json_name + ".json", os.O_WRONLY | os.O_CREAT, mode=0o640), "w",
+ encoding='utf-8') as fd:
+ json.dump(result, fd, indent=4, ensure_ascii=False)
def get_args():
parser = argparse.ArgumentParser(
description=f"analyze components deps.\n")
- parser.add_argument("-c", "--config_json", required=True, type=str,
- help="path of root path of openharmony/vendor/hihope/{product_name}/config.json")
- parser.add_argument("-d", "--parts_deps_json", required=True, type=str,
- help="path of out/{product_name}/build_configs/parts_info/parts_deps.json")
- parser.add_argument("-s", "--single_component_name", type=str, default="false",
- help="single component name")
- parser.add_argument("-o", "--output_file", type=str, default="components_deps",
- help="eg: demo/components_deps")
- args = parser.parse_args()
- return args
+ parser.add_argument("-p", "--components_gn_path_list", required=True, type=str,
+ help="path of pr BUILD.gn")
+ parser.add_argument("-g", "--gn_component", required=True, type=str,
+ help="gn file corresponding component")
+ parser.add_argument("-c", "--config_path", required=True, type=str,
+ help="path of config_file")
+ parser.add_argument("-o", "--open_component_xml_path", required=True, type=str,
+ help="open component name set")
+ parser.add_argument("-r", "--result_json_name", type=str, default="result",
+ help="name of output_json")
+ return parser.parse_args()
if __name__ == '__main__':
args = get_args()
- config_json_path = args.config_json
- parts_deps_json_path = args.parts_deps_json
- output_file_name = args.output_file
- single_component = args.single_component_name
- Analyzer.analysis(config_json_path, parts_deps_json_path, output_file_name, single_component)
\ No newline at end of file
+ gn_path_list = args.components_gn_path_list.split(',')
+ gn_component = args.gn_component.split(',')
+ config_path = args.config_path
+ open_components_xml_path = args.open_component_xml_path
+ result_json_name = args.result_json_name
+ Analyzer.analysis(gn_path_list, gn_component, config_path, open_components_xml_path, result_json_name)
\ No newline at end of file
diff --git a/tools/deps_guard/rules_checker/hdi_rule.py b/tools/deps_guard/rules_checker/hdi_rule.py
index 170170931c6053d5aac39a0bd2b1c5c208d40e40..a547064fd45bde23ed7afe34c29021e2a442973e 100755
--- a/tools/deps_guard/rules_checker/hdi_rule.py
+++ b/tools/deps_guard/rules_checker/hdi_rule.py
@@ -20,62 +20,73 @@ import json
from .base_rule import BaseRule
-class HdiRule(BaseRule):
- RULE_NAME = "NO-Depends-On-HDI"
-
- def __check_depends_on_hdi(self):
- lists = self.get_white_lists()
-
- passed = True
-
- hdi_without_shlib_type = []
- non_hdi_with_hdi_shlib_type = []
-
- # Check if any napi modules has dependedBy
- for mod in self.get_mgr().get_all():
- is_hdi = False
- if "hdiType" in mod and mod["hdiType"] == "hdi_service":
- is_hdi = True
- # Collect non HDI modules with shlib_type of value "hdi"
- if not is_hdi and ("shlib_type" in mod and mod["shlib_type"] == "hdi"):
- non_hdi_with_hdi_shlib_type.append(mod)
-
- # Collect HDI modules without shlib_type with value of "hdi"
- if is_hdi and ("shlib_type" not in mod or mod["shlib_type"] != "hdi"):
- if mod["name"] not in lists:
- hdi_without_shlib_type.append(mod)
-
- if not is_hdi:
- continue
-
- if len(mod["dependedBy"]) == 0:
- continue
- if mod["name"] in lists:
- continue
-
- # If hdi module has version_script to specify exported symbols, it can be depended by others
- if "version_script" in mod:
- continue
-
- # Check if HDI modules is depended by other modules
- self.error("hdi module %s depended by:" % mod["name"])
- for dep in mod["dependedBy"]:
- caller = dep["caller"]
- self.log(" module [%s] defined in [%s]" % (caller["name"], caller["labelPath"]))
- passed = False
-
- if len(hdi_without_shlib_type) > 0:
- for mod in hdi_without_shlib_type:
- if mod["name"] not in lists:
- passed = False
- self.error('hdi module %s has no shlib_type="hdi", add it in %s' % (mod["name"], mod["labelPath"]))
-
- if len(non_hdi_with_hdi_shlib_type) > 0:
- for mod in non_hdi_with_hdi_shlib_type:
- self.warn('non hdi module %s with shlib_type="hdi", %s' % (mod["name"], mod["labelPath"]))
-
- return passed
-
- def check(self):
- return self.__check_depends_on_hdi()
+class HdiRule(BaseRule):
+ RULE_NAME = "NO-Depends-On-HDI"
+
+ def __check_depends_on_hdi(self):
+ lists = self.get_white_lists()
+
+ passed = True
+
+ hdi_without_shlib_type = []
+ non_hdi_with_hdi_shlib_type = []
+
+ # Check if any napi modules has dependedBy
+ for mod in self.get_mgr().get_all():
+ is_hdi = False
+ if "hdiType" in mod and mod["hdiType"] == "hdi_service":
+ is_hdi = True
+ # Collect non HDI modules with shlib_type of value "hdi"
+ if not is_hdi and ("shlib_type" in mod and mod["shlib_type"] == "hdi"):
+ non_hdi_with_hdi_shlib_type.append(mod)
+
+ # Collect HDI modules without shlib_type with value of "hdi"
+ if is_hdi and ("shlib_type" not in mod or mod["shlib_type"] != "hdi"):
+ if mod["name"] not in lists:
+ hdi_without_shlib_type.append(mod)
+
+ if self.__ignore_mod(mod, is_hdi, lists):
+ continue
+
+ # Check if HDI modules is depended by other modules
+ self.error("hdi module %s depended by:" % mod["name"])
+ for dep in mod["dependedBy"]:
+ caller = dep["caller"]
+ self.log(" module [%s] defined in [%s]" % (caller["name"], caller["labelPath"]))
+ passed = False
+
+ if len(hdi_without_shlib_type) > 0:
+ for mod in hdi_without_shlib_type:
+ if mod["name"] not in lists:
+ passed = False
+ self.error('hdi module %s has no shlib_type="hdi", add it in %s' % (mod["name"], mod["labelPath"]))
+
+ if len(non_hdi_with_hdi_shlib_type) > 0:
+ for mod in non_hdi_with_hdi_shlib_type:
+ self.warn('non hdi module %s with shlib_type="hdi", %s' % (mod["name"], mod["labelPath"]))
+
+ return passed
+
+ def check(self):
+ return self.__check_depends_on_hdi()
+
+ def __ignore_mod(self, mod, is_hdi, lists):
+ ignore_flag = False
+ if not is_hdi:
+ ignore_flag = True
+ return ignore_flag
+
+ if len(mod["dependedBy"]) == 0:
+ ignore_flag = True
+ return ignore_flag
+
+ if mod["name"] in lists:
+ ignore_flag = True
+ return ignore_flag
+
+ # If hdi module has version_script to specify exported symbols, it can be depended by others
+ if "version_script" in mod:
+ ignore_flag = True
+
+ return ignore_flag
diff --git a/tools/rom_ram_analyzer/standard/pkgs/basic_tool.py b/tools/rom_ram_analyzer/standard/pkgs/basic_tool.py
index 73ae9524a85a7338e40c67cd8ca875b02341a2c7..955f4dfc767c25d05f77cd5cf23f0fadec59cecd 100644
--- a/tools/rom_ram_analyzer/standard/pkgs/basic_tool.py
+++ b/tools/rom_ram_analyzer/standard/pkgs/basic_tool.py
@@ -6,6 +6,7 @@ import re
from pathlib import Path
from typing import *
+
def unit_adaptive(size: int) -> str:
unit_list = ["Byte", "KB", "MB", "GB"]
index = 0
@@ -13,9 +14,10 @@ def unit_adaptive(size: int) -> str:
size /= 1024
index += 1
if index == len(unit_list):
- index = len(unit_list)-1
+ index = len(unit_list) - 1
size *= 1024
- return str(round(size,2))+unit_list[index]
+ return str(round(size, 2)) + unit_list[index]
+
class BasicTool:
@classmethod
@@ -34,7 +36,7 @@ class BasicTool:
ptrn = re.compile(ptrn, re.M | re.S)
result = re.finditer(ptrn, content)
return result
-
+
@classmethod
def find_all_files(cls, folder: str, real_path: bool = True, apply_abs: bool = True, de_duplicate: bool = True,
p_filter: typing.Callable = lambda x: True) -> list:
@@ -53,7 +55,7 @@ class BasicTool:
@classmethod
def get_abs_path(cls, path: str) -> str:
return os.path.abspath(os.path.expanduser(path))
-
+
@classmethod
def re_group_1(cls, content: str, pattern: str, **kwargs) -> str:
"""
@@ -69,9 +71,9 @@ class BasicTool:
if result:
return result.group(1)
return str()
-
+
@classmethod
- def execute(cls, cmd: str, post_processor: Callable[[Text], Text] = lambda x:x) -> Any:
+ def execute(cls, cmd: str, post_processor: Callable[[Text], Text] = lambda x: x) -> Any:
"""
封装popen,返回标准输出的列表
:param post_processor: 对执行结果进行处理
@@ -86,4 +88,4 @@ class BasicTool:
if __name__ == '__main__':
for i in BasicTool.find_all_files(".", apply_abs=False):
- print(i)
\ No newline at end of file
+ print(i)
diff --git a/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py b/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py
index 702d379dc44fc27c891921791cf24609e88c33a8..f4013e3fce316e5d98c14379a7182cdd1f45f28a 100644
--- a/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py
+++ b/tools/rom_ram_analyzer/standard/pkgs/gn_common_tool.py
@@ -105,7 +105,8 @@ class GnCommonTool:
return part_name, subsystem_name
@classmethod
- def _parse_part_subsystem(cls, part_var_flag: bool, subsystem_var_flag: bool, var_list: List[str], part_cmd: str, subsystem_cmd: str, gn_file: str, project_path: str) -> Tuple[str, str]:
+ def _parse_part_subsystem(cls, part_var_flag: bool, subsystem_var_flag: bool, var_list: List[str], part_cmd: str,
+ subsystem_cmd: str, gn_file: str, project_path: str) -> Tuple[str, str]:
part_name = subsystem_name = None
part = os.popen(part_cmd).read().strip()
if len(part) != 0:
@@ -169,6 +170,7 @@ class GnCommonTool:
subsystem_name = t_subsystem_name
return part_name, subsystem_name
+
class GnVariableParser:
@classmethod
def string_parser(cls, var: str, content: str) -> str:
diff --git a/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py b/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py
index af25f7169f4cc6a6614bd80fe3ae8fd390bc4aac..fc6d2f7dc11688c54b87b4035d6d37eb4f6f828f 100644
--- a/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py
+++ b/tools/rom_ram_analyzer/standard/pkgs/rom_ram_baseline_collector.py
@@ -28,8 +28,10 @@ import logging
class RomRamBaselineCollector:
"""collect baseline of rom and ram from bundle.json
"""
+
@classmethod
- def _put(cls, result_dict: Dict, subsystem_name: str, component_name: str, rom_size: str, ram_size: str, bundle_path: str) -> None:
+ def _put(cls, result_dict: Dict, subsystem_name: str, component_name: str, rom_size: str, ram_size: str,
+ bundle_path: str) -> None:
if not result_dict.get(subsystem_name):
result_dict[subsystem_name] = dict()
result_dict[subsystem_name][component_name] = dict()
@@ -43,6 +45,7 @@ class RomRamBaselineCollector:
x = x.split("\n")
y = [item for item in x if item]
return y
+
bundle_list = BasicTool.execute(
cmd=f"find {oh_path} -name bundle.json", post_processor=post_handler)
rom_ram_baseline_dict: Dict[str, Dict] = dict()
diff --git a/tools/rom_ram_analyzer/standard/ram_analyzer.py b/tools/rom_ram_analyzer/standard/ram_analyzer.py
index 4c7851ca670120b0ba0d1bc05dee1cdc2d92cffa..1e3976089ae9e7a8ee0469bbe368732735deb90d 100644
--- a/tools/rom_ram_analyzer/standard/ram_analyzer.py
+++ b/tools/rom_ram_analyzer/standard/ram_analyzer.py
@@ -99,7 +99,7 @@ class RamAnalyzer:
@classmethod
def __parse_hidumper_mem(cls, content: typing.Text, device_num: str, ss: str = "Pss") -> typing.Dict[
- typing.Text, int]:
+ typing.Text, int]:
"""
解析:hidumper --meme的结果
返回{process_name: pss}形式的字典
@@ -141,7 +141,7 @@ class RamAnalyzer:
continue
name = processed[1] # 否则的话就取名字,和对应的size
size = int(processed[cls.__ss_dict.get(ss)]) * \
- 1024 # kilo byte to byte
+ 1024 # kilo byte to byte
full_process_name = find_full_process_name(name)
if not full_process_name:
print(
@@ -213,7 +213,7 @@ class RamAnalyzer:
component_val_dict: typing.Dict[str, str] = sub_val_dict.get(
component_name)
delete_values_from_dict(component_val_dict, [
- "size", "file_count"])
+ "size", "file_count"])
for file_name, size in component_val_dict.items():
file_basename: str = os.path.split(file_name)[-1]
elf_info_dict[file_basename] = {
@@ -251,12 +251,11 @@ class RamAnalyzer:
@classmethod
def get_process_so_relationship(cls, cfg_path: str, profile_path: str) -> typing.Dict[
- str, typing.List[str]]:
+ str, typing.List[str]]:
"""
parse the relationship between process and elf file
"""
# 从merged_sa里面收集
- # json_list = glob.glob(json_path + os.sep + "*[.]json", recursive=True)
process_elf_dict: typing.Dict[str, typing.List[str]] = dict()
cfg_list = glob.glob(cfg_path + os.sep + "*.cfg", recursive=True)
for cfg in cfg_list:
@@ -289,16 +288,17 @@ class RamAnalyzer:
}
}
"""
- #print('data_dict',data_dict)
tmp_dict = copy.deepcopy(data_dict)
writer = SimpleExcelWriter("ram_info")
header_unit = "" if unit_adapt else ", Byte"
- header = header = [
- "subsystem_name", "component_name", f"component_size(ram{header_unit})", "process_name", f"process_size({ss}{header_unit})", "elf", f"elf_size{'' if unit_adapt else '(Byte)'}"
+ header = [
+ "subsystem_name", "component_name", f"component_size(ram{header_unit})", "process_name",
+ f"process_size({ss}{header_unit})", "elf", f"elf_size{'' if unit_adapt else '(Byte)'}"
]
if baseline_file:
header = [
- "subsystem_name", "component_name", f"component_size(ram{header_unit})", "baseline", "process_name", f"process_size({ss}{header_unit})", "elf", f"elf_size{'' if unit_adapt else '(Byte)'}"
+ "subsystem_name", "component_name", f"component_size(ram{header_unit})", "baseline", "process_name",
+ f"process_size({ss}{header_unit})", "elf", f"elf_size{'' if unit_adapt else '(Byte)'}"
]
writer.set_sheet_header(header)
subsystem_c = 0
@@ -347,7 +347,7 @@ class RamAnalyzer:
process_start_r, process_c, process_end_r, process_c, process_name)
writer.write_merge(
process_start_r, process_size_c, process_end_r, process_size_c, process_size)
- process_start_r = process_end_r+1
+ process_start_r = process_end_r + 1
writer.write_merge(component_start_r, component_c,
component_end_r, component_c, component_name)
writer.write_merge(component_start_r, component_size_c,
@@ -358,7 +358,7 @@ class RamAnalyzer:
component_start_r = component_end_r + 1
writer.write_merge(subsystem_start_r, subsystem_c,
subsystem_end_r, subsystem_c, subsystem_name)
- subsystem_start_r = subsystem_end_r+1
+ subsystem_start_r = subsystem_end_r + 1
writer.save(filename)
@classmethod
@@ -382,7 +382,7 @@ class RamAnalyzer:
if cn == "size" or cn == "file_count":
continue
component_val_dict: typing.Dict[str,
- int] = sub_val_dict.get(cn)
+ int] = sub_val_dict.get(cn)
for k, v in component_val_dict.items():
if k == "size" or k == "file_count":
continue
@@ -406,6 +406,12 @@ class RamAnalyzer:
component_info["baseline"] = baseline_dict[subsystem_name][component_name].get(
"ram")
+ @classmethod
+ def inside_refactored_result_unit_adaptive(cls, process_info):
+ for elf_name, elf_size in process_info["elf"].items():
+ process_info["elf"][elf_name] = unit_adaptive(elf_size)
+ return process_info
+
@classmethod
def refactored_result_unit_adaptive(cls, result_dict: Dict[str, Dict]) -> None:
for subsystem_name, subsystem_info in result_dict.items():
@@ -417,14 +423,13 @@ class RamAnalyzer:
for process_name, process_info in component_info.items():
pro_size = unit_adaptive(process_info["size"])
del process_info["size"]
- for elf_name, elf_size in process_info["elf"].items():
- process_info["elf"][elf_name] = unit_adaptive(elf_size)
+ process_info = cls.inside_refactored_result_unit_adaptive(process_info)
process_info["size"] = pro_size
component_info["size"] = com_size
subsystem_info["size"] = sub_size
@classmethod
- def result_process1(cls,result_dict,process_name,process_size,elf,size):
+ def result_process1(cls, result_dict, process_name, process_size, elf, size):
result_dict[process_name] = dict()
result_dict[process_name]["size"] = process_size
result_dict[process_name]["startup"] = dict()
@@ -434,7 +439,7 @@ class RamAnalyzer:
return result_dict
@classmethod
- def result_process2(cls, result_dict, process_name, subsystem_name, process_size,component_name,hap_name, size):
+ def result_process2(cls, result_dict, process_name, subsystem_name, process_size, component_name, hap_name, size):
result_dict[process_name] = dict()
result_dict[process_name]["size"] = process_size
result_dict[process_name][subsystem_name] = dict()
@@ -453,7 +458,7 @@ class RamAnalyzer:
return result_dict
@classmethod
- def result_process4(cls, result_dict, process_size_dict, rom_result_dict, process_elf_dict,so_info_dict):
+ def result_process4(cls, result_dict, process_size_dict, rom_result_dict, process_elf_dict, so_info_dict):
def get(key: typing.Any, dt: typing.Dict[str, typing.Any]):
for k, v in dt.items():
if k.startswith(key) or (len(v) > 0 and key == v[0]):
@@ -468,26 +473,29 @@ class RamAnalyzer:
if process_name == "init":
_, elf, _, _, size = cls.find_elf_size_from_rom_result(process_name, "startup", "init",
lambda x, y: os.path.split(y)[
- -1].lower() == x.lower(),
+ -1].lower() == x.lower(),
rom_result_dict)
- result_dict=cls.result_process1(result_dict,process_name,process_size,elf,size)
+ result_dict = cls.result_process1(result_dict, process_name, process_size, elf, size)
continue
# 如果是hap,特殊处理
if (process_name.startswith("com.") or process_name.startswith("ohos.")):
- _, hap_name, subsystem_name, component_name, size = cls.find_elf_size_from_rom_result(process_name, "*", "*",
+ _, hap_name, subsystem_name, component_name, size = cls.find_elf_size_from_rom_result(process_name, "*",
+ "*",
lambda x, y: len(
y.split(
'/')) >= 3 and x.lower().startswith(
- y.split('/')[2].lower()),
+ y.split('/')[
+ 2].lower()),
rom_result_dict)
- result_dict=cls.result_process2(result_dict, process_name, subsystem_name, process_size,component_name,hap_name, size)
+ result_dict = cls.result_process2(result_dict, process_name, subsystem_name, process_size,
+ component_name, hap_name, size)
continue
# 得到进程相关的elf文件list
so_list: list = get(process_name, process_elf_dict)
if so_list is None:
print("warning: process '{}' not found in .json or .cfg".format(
process_name))
- result_dict=cls.result_process3(result_dict, process_name, process_size)
+ result_dict = cls.result_process3(result_dict, process_name, process_size)
continue
result_dict[process_name] = dict()
result_dict[process_name]["size"] = process_size
@@ -509,6 +517,7 @@ class RamAnalyzer:
result_dict[process_name][subsystem_name][component_name] = dict()
result_dict[process_name][subsystem_name][component_name][so] = so_size
return result_dict
+
@classmethod
def analysis(cls, cfg_path: str, json_path: str, rom_result_json: str, device_num: str,
output_file: str, ss: str, output_excel: bool, baseline_file: str, unit_adapt: bool):
@@ -532,7 +541,8 @@ class RamAnalyzer:
process_size_dict: typing.Dict[str, int] = cls.process_hidumper_info(
device_num, ss)
result_dict: typing.Dict[str, typing.Dict[str, typing.Any]] = dict()
- result_dict = cls.result_process4(result_dict, process_size_dict, rom_result_dict, process_elf_dict,so_info_dict)
+ result_dict = cls.result_process4(result_dict, process_size_dict, rom_result_dict, process_elf_dict,
+ so_info_dict)
base_dir, _ = os.path.split(output_file)
if len(base_dir) != 0 and not os.path.isdir(base_dir):
os.makedirs(base_dir, exist_ok=True)
@@ -550,6 +560,27 @@ class RamAnalyzer:
refactored_result, output_file + ".xls", ss, baseline_file, unit_adapt)
+def inside_refacotr_result(component_info, refactored_ram_dict, subsystem_name, component_name, process_name,
+ process_size):
+ for elf_name, elf_size in component_info.items():
+ if not refactored_ram_dict.get(subsystem_name):
+ refactored_ram_dict[subsystem_name] = dict()
+ refactored_ram_dict[subsystem_name]["size"] = 0
+ if not refactored_ram_dict[subsystem_name].get(component_name):
+ refactored_ram_dict[subsystem_name][component_name] = dict(
+ )
+ refactored_ram_dict[subsystem_name][component_name]["size"] = 0
+ refactored_ram_dict[subsystem_name][component_name][process_name] = dict(
+ )
+ refactored_ram_dict[subsystem_name][component_name][process_name]["size"] = process_size
+ refactored_ram_dict[subsystem_name][component_name][process_name]["elf"] = dict(
+ )
+ refactored_ram_dict[subsystem_name][component_name][process_name]["elf"][elf_name] = elf_size
+ refactored_ram_dict[subsystem_name]["size"] += process_size
+ refactored_ram_dict[subsystem_name][component_name]["size"] += process_size
+ return refactored_ram_dict
+
+
def refacotr_result(ram_result: Dict[str, Dict]) -> Dict[str, Dict]:
refactored_ram_dict: Dict[str, Dict] = dict()
for process_name, process_info in ram_result.items():
@@ -557,22 +588,8 @@ def refacotr_result(ram_result: Dict[str, Dict]) -> Dict[str, Dict]:
del process_info["size"]
for subsystem_name, subsystem_info in process_info.items():
for component_name, component_info in subsystem_info.items():
- for elf_name, elf_size in component_info.items():
- if not refactored_ram_dict.get(subsystem_name):
- refactored_ram_dict[subsystem_name] = dict()
- refactored_ram_dict[subsystem_name]["size"] = 0
- if not refactored_ram_dict[subsystem_name].get(component_name):
- refactored_ram_dict[subsystem_name][component_name] = dict(
- )
- refactored_ram_dict[subsystem_name][component_name]["size"] = 0
- refactored_ram_dict[subsystem_name][component_name][process_name] = dict(
- )
- refactored_ram_dict[subsystem_name][component_name][process_name]["size"] = process_size
- refactored_ram_dict[subsystem_name][component_name][process_name]["elf"] = dict(
- )
- refactored_ram_dict[subsystem_name][component_name][process_name]["elf"][elf_name] = elf_size
- refactored_ram_dict[subsystem_name]["size"] += process_size
- refactored_ram_dict[subsystem_name][component_name]["size"] += process_size
+ refactored_ram_dict = inside_refacotr_result(component_info, refactored_ram_dict, subsystem_name,
+ component_name, process_name, process_size)
return refactored_ram_dict
@@ -615,8 +632,9 @@ if __name__ == '__main__':
rom_result = args.rom_result
device_num = args.device_num
output_filename = args.output_filename
- baseline_file = args.baseline_file
- output_excel = args.excel
- unit_adapt = args.unit_adaptive
+ baseline = args.baseline_file
+ output_excel_path = args.excel
+ unit_adaptiv = args.unit_adaptive
RamAnalyzer.analysis(cfg_path, profile_path, rom_result,
- device_num=device_num, output_file=output_filename, ss="Pss", output_excel=output_excel, baseline_file=baseline_file, unit_adapt=unit_adapt)
\ No newline at end of file
+ device_num=device_num, output_file=output_filename, ss="Pss", output_excel=output_excel_path,
+ baseline_file=baseline, unit_adapt=unit_adaptiv)
\ No newline at end of file
diff --git a/tools/rom_ram_analyzer/standard/rom_analyzer.py b/tools/rom_ram_analyzer/standard/rom_analyzer.py
index 87b08c461a7b7147bed352295dbb6546ed09df7f..76e2722a6abd8e65cea3e6fc4378fd270d335f44 100644
--- a/tools/rom_ram_analyzer/standard/rom_analyzer.py
+++ b/tools/rom_ram_analyzer/standard/rom_analyzer.py
@@ -109,14 +109,17 @@ class RomAnalyzer:
if label:
cs_flag = True
gn_path = os.path.join(project_path, label.split(':')[
- 0].lstrip('/'), "BUILD.gn")
+ 0].lstrip('/'), "BUILD.gn")
component_name = unit.get("part_name")
subsystem_name = unit.get("subsystem_name")
- if (not component_name) or (not subsystem_name):
+ if not component_name:
cn, sn = GnCommonTool.find_part_subsystem(
gn_path, project_path)
- component_name = cn if not component_name else component_name
- subsystem_name = sn if not subsystem_name else subsystem_name
+ component_name = cn
+ if not subsystem_name:
+ cn, sn = GnCommonTool.find_part_subsystem(
+ gn_path, project_path)
+ subsystem_name = sn
else:
print("warning: keyword 'label' not found in {}".format(unit))
for target in dest:
@@ -126,14 +129,24 @@ class RomAnalyzer:
"subsystem_name": subsystem_name,
"gn_path": gn_path,
}
+ continue
+ tmp = target.split('/')[-1]
+ pre_info = extra_info.get(tmp)
+ if not pre_info:
+ continue
else:
- tmp = target.split('/')[-1]
- pre_info = extra_info.get(tmp)
- if not pre_info:
- continue
product_info_dict[target] = pre_info
return product_info_dict
+ @classmethod
+ def __inside_save_result_as_excel(cls, add_baseline, subsystem_name, component_name,
+ baseline, file_name, size):
+ if add_baseline:
+ return [subsystem_name, component_name,
+ baseline, file_name, size]
+ else:
+ return [subsystem_name, component_name, file_name, size]
+
@classmethod
def __save_result_as_excel(cls, result_dict: dict, output_name: str, add_baseline: bool):
header = ["subsystem_name", "component_name",
@@ -173,10 +186,8 @@ class RomAnalyzer:
component_end_row += component_file_count
for file_name, size in component_dict.items():
- line = [subsystem_name, component_name, file_name, size]
- if add_baseline:
- line = [subsystem_name, component_name,
- baseline, file_name, size]
+ line = cls.__inside_save_result_as_excel(add_baseline, subsystem_name, component_name,
+ baseline, file_name, size)
excel_writer.append_line(line)
excel_writer.write_merge(component_start_row, component_col, component_end_row, component_col,
component_name)
@@ -190,7 +201,8 @@ class RomAnalyzer:
excel_writer.save(output_name + ".xls")
@classmethod
- def __put(cls, unit: typing.Dict[Text, Any], result_dict: typing.Dict[Text, Dict], baseline_dict: Dict[str, Any], add_baseline: bool):
+ def __put(cls, unit: typing.Dict[Text, Any], result_dict: typing.Dict[Text, Dict], baseline_dict: Dict[str, Any],
+ baseline: bool):
component_name = NOTFOUND if unit.get(
"component_name") is None else unit.get("component_name")
@@ -201,6 +213,7 @@ class RomAnalyzer:
if (not baseline_dict.get(subsystem_name)) or (not baseline_dict.get(subsystem_name).get(component_name)):
return str()
return baseline_dict.get(subsystem_name).get(component_name).get("rom")
+
size = unit.get("size")
relative_filepath = unit.get("relative_filepath")
if result_dict.get(subsystem_name) is None: # 子系统
@@ -212,7 +225,7 @@ class RomAnalyzer:
result_dict[subsystem_name][component_name] = dict()
result_dict[subsystem_name][component_name]["size"] = 0
result_dict[subsystem_name][component_name]["file_count"] = 0
- if add_baseline:
+ if baseline:
result_dict[subsystem_name][component_name]["baseline"] = get_rom_baseline(
)
@@ -238,7 +251,8 @@ class RomAnalyzer:
@classmethod
def analysis(cls, system_module_info_json: Text, product_dirs: List[str],
- project_path: Text, product_name: Text, output_file: Text, output_execel: bool, add_baseline: bool, unit_adapt: bool):
+ project_path: Text, product_name: Text, output_file: Text, output_execel: bool, add_baseline: bool,
+ unit_adapt: bool):
"""
system_module_info_json: json文件
product_dirs:要处理的产物的路径列表如["vendor", "system/"]
@@ -258,7 +272,8 @@ class RomAnalyzer:
pre_collector.collect_sa_profile()
extra_product_info_dict: Dict[str, Dict] = pre_collector.result_dict
product_info_dict = cls.__collect_product_info(
- system_module_info_json, project_path, extra_info=extra_product_info_dict) # collect product info from json file
+ system_module_info_json, project_path,
+ extra_info=extra_product_info_dict) # collect product info from json file
result_dict: Dict[Text:Dict] = dict()
for d in product_dirs:
file_list: List[Text] = BasicTool.find_all_files(d)
@@ -316,12 +331,12 @@ def get_args():
if __name__ == '__main__':
args = get_args()
module_info_json = args.module_info_json
- project_path = args.project_path
+ project_origin_path = args.project_path
product_name = args.product_name
product_dirs = args.product_dir
output_file = args.output_file
output_excel = args.excel
- add_baseline = args.baseline
- unit_adapt = args.unit_adaptive
+ baseline_path = args.baseline
+ unit_adaptiv = args.unit_adaptive
RomAnalyzer.analysis(module_info_json, product_dirs,
- project_path, product_name, output_file, output_excel, add_baseline, unit_adapt)
\ No newline at end of file
+ project_origin_path, product_name, output_file, output_excel, baseline_path, unit_adaptiv)