diff --git a/test/scripts/email_config.yaml b/test/scripts/email_config.yaml index 63183d6d16706085c01197d848b1dd0a27303e0e..77a779d374bd56457f52577dcd3a8fd7b33e4dfd 100644 --- a/test/scripts/email_config.yaml +++ b/test/scripts/email_config.yaml @@ -11,15 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -sender_email_address : -auth_code : -receiver_list : -smtp_server: -smtp_port: +sender_email_address : +auth_code : +receiver_list : +smtp_server : +smtp_port : xts_report_file : ".\\auto_xts_test\\result\\summary_report.html" -sdk_report_file : "" +sdk_report_file : ".\\sdk_test\\sdk_test_report.html" perf_report_file : "" attatchment_files : - ".\\auto_xts_test\\result\\details_report.html" - ".\\auto_xts_test\\result\\failures_report.html" + - ".\\sdk_test\\sdk_test_report.html" + - ".\\sdk_test\\sdk_test_log.txt" diff --git a/test/scripts/sdk_test/config.yaml b/test/scripts/sdk_test/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffd306bf0439f30ca75ddd1af3c3ebf2345de3e2 --- /dev/null +++ b/test/scripts/sdk_test/config.yaml @@ -0,0 +1,122 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:\www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Description: configs for test suite + +# environment settings +deveco_path: D:\Software\Deveco-0602\DevEco Studio +deveco_sdk_path: D:\deveco-sdk\deveco-sdk-0602 +node_js_path: D:\Software\nodejs # The nodejs which is used in Deveco + +# output settings +output_html_file: ./sdk_test_report.html +log_file: ./sdk_test_log.txt + +# descriptions about test haps list +# each hap have the following attributes: +# -name: name of the hap +# -path: path of the hap project +# -type: type of the hap. Available values are: [stage, fa, js, compatible8] +# besides, you can also append attributes in [widget, ohosTest, error, exceed_length_error] +# -widget: indicates this hap has widget, which has widgets.abc in stage mode +# -error: indicates this hap need to test whether compile error is correctly generated as well +# -exceed_length_error: indicates this hap need to test whether compile error is correctly generated when path exceed the limit +# -ohosTest: indicates this hap needed to compile ohosTest as well +# -build_path: path to build path, in form of list +# -cache_path: path to cache path, in form of list +# -output_hap_path: path of output hap(debug mode), in form of list +# -output_app_path: path of output app(release mode), in form of list +# -inc_modify_file: path of modified file in incremental build, in form of list +# -description: description about the hap, this attribute is not used yet. +haps: + # complex haps + - calendar: + name: Calendar + path: D:\haps\calendar + type: [stage] + build_path: + cache_path: + output_hap_path: + output_app_path: + inc_modify_file: + description: + # widget haps + - widgetdemo: + name: WidgetDemo + path: D:\haps\WidgetDemo + type: [stage, widget] + build_path: + cache_path: + output_hap_path: + output_app_path: + inc_modify_file: + description: + # IDE demo haps + - idedemo_00: + name: IdeStageDemoEmptyAbility + path: D:\sdk-test\DemoApplication_EmptyAbility + type: [stage, ohosTest, exceed_length_error, error] + build_path: [entry, build, default] + cache_path: [cache, default, default@CompileArkTS, esmodule] + output_hap_path: [outputs, default, entry-default-unsigned.hap] + output_app_path: [outputs, default, app, entry-default.hap] + inc_modify_file: [entry, src, main, ets, pages, Index.ets] + description: + - idedemo_01: + name: IdeFaDemoEmptyAbility + path: D:\sdk-test\DemoApplication_EmptyAbility_fa + type: [fa, ohosTest, exceed_length_error, error] + build_path: [entry, build, default] + cache_path: [cache, default, default@LegacyCompileArkTS, jsbundle] + output_hap_path: [outputs, default, entry-default-unsigned.hap] + output_app_path: [outputs, default, app, entry-default.hap] + inc_modify_file: [entry, src, main, ets, MainAbility, pages, index.ets] + description: + - idedemo_02: + name: IdeCompatible8DemoEmptyAbility + path: D:\sdk-test\DemoApplication_EmptyAbility_compatible8 + type: [compatible8, ohosTest, exceed_length_error, error] + build_path: [entry, build, default] + cache_path: [cache, default, default@LegacyCompileArkTS, jsbundle] + output_hap_path: [outputs, default, entry-default-unsigned.hap] + output_app_path: [outputs, default, app, entry-default.hap] + inc_modify_file: [entry, src, main, ets, MainAbility, pages, index.ets] + description: + - idedemo_03: + name: IdeJsDemoEmptyAbility + path: D:\sdk-test\DemoApplication_EmptyAbility_js + type: [js, ohosTest, exceed_length_error, error] + build_path: [entry, build, default] + cache_path: [cache, default, default@LegacyCompileJS, jsbundle] + output_hap_path: [outputs, default, entry-default-unsigned.hap] + output_app_path: [outputs, default, app, entry-default.hap] + inc_modify_file: [entry, src, main, js, MainAbility, pages, index, index.js] + description: + +# modifications for incremental compilation and other tests +patch_content: + patch_new_file_ets: + # This new file will be added to the same directory as 'inc_modify_file' specified in haps + name: test.ets + content: "export function a() {return 'a'}" + patch_new_file_js: + name: test.js + content: "export function a() {return 'a'}" + patch_lines_1: + head: "import {a} from './test'\n" + tail: "\n console.log(a.toString());\n" + patch_lines_2: + tail: "\n console.log('This is a new line');\n" + patch_lines_error: + tail: "\n let a_duplicated_value_for_test_suite = 1; function a_duplicated_value_for_test_suite() {};" + expected_error: [Duplicate identifier 'a_duplicated_value_for_test_suite', Identifier 'a_duplicated_value_for_test_suite' has already been declared] \ No newline at end of file diff --git a/test/scripts/sdk_test/entry.py b/test/scripts/sdk_test/entry.py new file mode 100644 index 0000000000000000000000000000000000000000..7f73a9afcda6e192e8ce86e12046871241cdf87d --- /dev/null +++ b/test/scripts/sdk_test/entry.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +""" +Copyright (c) 2023 Huawei Device Co., Ltd. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Description: entry to run sdk test daily +""" + +import os +import subprocess +import time + +import utils + + +def run(): + test_start_time = time.strftime('%Y%m%d-%H%M%S') + sdk_url = utils.get_sdk_url() + + cmd = ['python3', 'run.py'] + cmd.extend(['--sdkPath', sdk_url]) + cmd.extend(['--hapMode', 'all']) + cmd.extend(['--compileMode', 'all']) + cmd.extend(['--logLevel', 'debug']) + cmd.extend(['--logFile', 'log' + '_' + test_start_time + '.txt']) + + current_dir = os.path.dirname(os.path.abspath(__file__)) + print(current_dir) + print(cmd) + process = subprocess.Popen(cmd, cwd=current_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + stdout, stderr = process.communicate(timeout=60 * 60 * 5) + stdout_utf8 = stdout.decode("utf-8", errors="ignore") + stderr_utf8 = stderr.decode("utf-8", errors="ignore") + print(f"cmd stdout: {stdout_utf8}") + print(f"cmd stderr: {stderr_utf8}") + + +if __name__ == '__main__': + run() diff --git a/test/scripts/sdk_test/execution.py b/test/scripts/sdk_test/execution.py new file mode 100644 index 0000000000000000000000000000000000000000..39b0f1422afb051daa1f899b1b7c31f343a880d1 --- /dev/null +++ b/test/scripts/sdk_test/execution.py @@ -0,0 +1,954 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +""" +Copyright (c) 2023 Huawei Device Co., Ltd. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Description: execute test tasks +""" + +import logging +import os +import re +import shutil +import signal +import subprocess +import zipfile + +import json5 + +import options +import utils + + +class IncrementalTest: + @staticmethod + def validate_module_name_change(task, inc_task, is_debug, stdout, stderr, new_module_name): + output_file = get_compile_output_file_path(task, is_debug) + output_dir = os.path.dirname(output_file) + output_file_name = os.path.basename(output_file) + output_file_name_items = output_file_name.split('-') # hap name format: entry-default.hap + output_file_name_items[0] = new_module_name + output_file_name = '-'.join(output_file_name_items) + new_module_name_output_file = os.path.join(output_dir, output_file_name) + + logging.debug(f"new module hap file: {new_module_name_output_file}") + + passed = validate(inc_task, task, is_debug, stdout, stderr, new_module_name_output_file) + logging.debug(f"validate new module hap file, passed {passed}") + if not passed: + return + + if is_debug: + inc_info = inc_task.debug_info + else: + inc_info = inc_task.release_info + uncompressed_output_file = new_module_name_output_file + '.uncompressed' + with zipfile.ZipFile(new_module_name_output_file, 'r') as zip_ref: + zip_ref.extractall(uncompressed_output_file) + + abc_path = os.path.join(uncompressed_output_file, 'ets') + modules_abc_path = os.path.join(abc_path, 'modules.abc') + modules_pa = disasm_abc(modules_abc_path) + if not modules_pa or not os.path.exists(modules_pa): + inc_info.result = options.TaskResult.failed + inc_info.error_message = f'ark_disasm failed, module name change not verified' + return + + func_str = '' + with open(modules_pa, 'r', encoding='utf-8') as pa: + line = pa.readline() + while line: + if '.function' in line.strip(): + func_str = line.strip() + break + line = pa.readline() + + func_define_items = func_str.split('.') + if not new_module_name in func_define_items: + inc_info.result = options.TaskResult.failed + inc_info.error_message = f'expected entry name {new_module_name} in function name, \ + actual function name: {func_str}' + + shutil.rmtree(uncompressed_output_file) + + @staticmethod + def is_file_in_modified_files(task_type, backup_file_relative_path, modified_cache_files): + if 'stage' in task_type: + return backup_file_relative_path in modified_cache_files + else: + non_temporary_path = backup_file_relative_path.split("temporary")[1].lstrip(os.path.sep) + logging.debug(f"non_temporary_path: {non_temporary_path}") + for file in modified_cache_files: + logging.debug(f"modified_cache_files file: {file}") + if non_temporary_path in file: + return True + return False + + @staticmethod + def validate_compile_incremental_file(task, inc_task, is_debug, modified_files): + cache_extension = '' + if 'stage' in task.type: + cache_extention = '.protoBin' + elif 'fa' in task.type or 'compatible8' in task.type: + cache_extention = '.temp.abc' + elif 'js' in task.type: + cache_extention = '.abc' + + modified_cache_files = [] + # modified_files is a list of file with relative path to .../debug/release + for file in modified_files: + name, ext = os.path.splitext(file) + modified_cache_files.append(name + cache_extention) + + logging.debug(f"modified_cache_files: {modified_cache_files}") + + if is_debug: + cache_path = os.path.join(task.path, *(task.build_path), *(task.cache_path), 'debug') + backup_path = task.backup_info.cache_debug + inc_info = inc_task.debug_info + else: + cache_path = os.path.join(task.path, *(task.build_path), *(task.cache_path), 'release') + backup_path = task.backup_info.cache_release + inc_info = inc_task.release_info + + for root, dirs, files in os.walk(cache_path): + for file in files: + if not file.endswith(cache_extention): + continue + file_absolute_path = os.path.join(root, file) + file_relative_path = os.path.relpath(file_absolute_path, cache_path) + backup_file = os.path.join(backup_path, file_relative_path) + + if not os.path.exists(backup_file): + logging.debug(f"backup file not exits: {backup_file}") + continue + + if utils.is_file_timestamps_same(file_absolute_path, backup_file): + continue + + logging.debug(f"found file ${file_relative_path} changed") + is_file_in_list = IncrementalTest.is_file_in_modified_files( + task.type, file_relative_path, modified_cache_files) + logging.debug(f"is file in list: {is_file_in_list}") + if not is_file_in_list: + inc_info.result = options.TaskResult.failed + inc_info.error_message = f'Incremental compile found unexpected file timestamp changed. \ + Changed file: {file_relative_path}' + return + + @staticmethod + def prepare_incremental_task(task, test_name): + if test_name in task.incre_compilation_info: + inc_task = task.incre_compilation_info[test_name] + else: + inc_task = options.IncCompilationInfo() + inc_task.name = test_name + task.incre_compilation_info[test_name] = inc_task + return inc_task + + @staticmethod + def compile_incremental_no_modify(task, is_debug): + test_name = 'no_change' + inc_task = IncrementalTest.prepare_incremental_task(task, test_name) + + logging.info(f"==========> Running {test_name} for task: {task.name}") + [stdout, stderr] = compile_project(task, is_debug) + passed = validate(inc_task, task, is_debug, stdout, stderr) + if passed: + IncrementalTest.validate_compile_incremental_file(task, inc_task, is_debug, []) + + @staticmethod + def compile_incremental_add_oneline(task, is_debug): + test_name = 'add_oneline' + inc_task = IncrementalTest.prepare_incremental_task(task, test_name) + + logging.info(f"==========> Running {test_name} for task: {task.name}") + modify_file_item = task.inc_modify_file + modify_file = os.path.join(task.path, *modify_file_item) + modify_file_backup = modify_file + ".bak" + shutil.copyfile(modify_file, modify_file_backup) + + with open(modify_file, 'a', encoding='utf-8') as file: + file.write(options.configs.get('patch_content').get('patch_lines_2').get('tail')) + + [stdout, stderr] = compile_project(task, is_debug) + passed = validate(inc_task, task, is_debug, stdout, stderr) + if passed: + modified_files = [os.path.join(*modify_file_item)] + IncrementalTest.validate_compile_incremental_file(task, inc_task, is_debug, modified_files) + + shutil.move(modify_file_backup, modify_file) + + @staticmethod + def compile_incremental_add_file(task, is_debug): + test_name = 'add_file' + inc_task = IncrementalTest.prepare_incremental_task(task, test_name) + + logging.info(f"==========> Running {test_name} for task: {task.name}") + modify_file_item = task.inc_modify_file + modify_file = os.path.join(task.path, *modify_file_item) + modify_file_backup = modify_file + ".bak" + shutil.copyfile(modify_file, modify_file_backup) + + modify_dir = os.path.dirname(modify_file) + if 'js' in task.type: + patch_content = options.configs.get('patch_content').get('patch_new_file_js') + new_file_name = patch_content.get('name') + new_file_content = patch_content.get('content') + else: + patch_content = options.configs.get('patch_content').get('patch_new_file_ets') + new_file_name = patch_content.get('name') + new_file_content = patch_content.get('content') + new_file = os.path.join(modify_dir, new_file_name) + + with open(new_file, 'w', encoding='utf-8') as file: + file.writelines(new_file_content) + + with open(modify_file, 'r+', encoding='utf-8') as file: + old_content = file.read() + file.seek(0) + patch_lines = options.configs.get('patch_content').get('patch_lines_1') + file.write(patch_lines.get('head')) + file.write(old_content) + file.write(patch_lines.get('tail')) + + [stdout, stderr] = compile_project(task, is_debug) + passed = validate(inc_task, task, is_debug, stdout, stderr) + if passed: + modified_files = [os.path.join(*modify_file_item)] + IncrementalTest.validate_compile_incremental_file(task, inc_task, is_debug, modified_files) + + shutil.move(modify_file_backup, modify_file) + os.remove(new_file) + + @staticmethod + def compile_incremental_delete_file(task, is_debug): + test_name = 'delete_file' + inc_task = IncrementalTest.prepare_incremental_task(task, test_name) + + logging.info(f"==========> Running {test_name} for task: {task.name}") + # this test is after 'add_file', and in test 'add_file' already done remove file, + # so here just call compile + [stdout, stderr] = compile_project(task, is_debug) + passed = validate(inc_task, task, is_debug, stdout, stderr) + if passed: + modify_file_item = task.inc_modify_file + modified_files = [os.path.join(*modify_file_item)] + IncrementalTest.validate_compile_incremental_file(task, inc_task, is_debug, modified_files) + + @staticmethod + def compile_incremental_reverse_hap_mode(task, is_debug): + test_name = 'reverse_hap_mode' + inc_task = IncrementalTest.prepare_incremental_task(task, test_name) + + logging.info(f"==========> Running {test_name} for task: {task.name}") + hap_mode = not is_debug + [stdout, stderr] = compile_project(task, hap_mode) + validate(inc_task, task, hap_mode, stdout, stderr) + + @staticmethod + def compile_incremental_modify_module_name(task, is_debug): + if 'stage' not in task.type: + return + + test_name = 'change_module_name' + inc_task = IncrementalTest.prepare_incremental_task(task, test_name) + + logging.info(f"==========> Running {test_name} for task: {task.name}") + # modify build-profile.json5 + profile_file = os.path.join(task.path, 'build-profile.json5') + profile_file_backup = profile_file + ".bak" + shutil.copyfile(profile_file, profile_file_backup) + + with open(profile_file, 'r') as file: + profile_data = json5.load(file) + new_module_name = "new_entry" + logging.debug(f"profile_data is: {profile_data}") + profile_data['modules'][0]['name'] = new_module_name + with open(profile_file, 'w') as file: + json5.dump(profile_data, file) + + # modify module.json5 for stage mode + entry_item = task.build_path[:-2] # to entry path + config_file_dir = os.path.join(task.path, *entry_item, 'src', 'main') + config_file = os.path.join(config_file_dir, 'module.json5') + config_file_backup = config_file + ".bak" + shutil.copyfile(config_file, config_file_backup) + + with open(config_file, 'r') as file: + config_data = json5.load(file) + config_data['module']['name'] = new_module_name + with open(config_file, 'w') as file: + json5.dump(config_data, file) + + try: + [stdout, stderr] = compile_project(task, is_debug) + IncrementalTest.validate_module_name_change(task, inc_task, is_debug, stdout, stderr, new_module_name) + except Exception as e: + logging.exception(e) + finally: + shutil.move(profile_file_backup, profile_file) + shutil.move(config_file_backup, config_file) + + +class OtherTest: + @staticmethod + def is_abc_same_in_haps(hap_1, hap_2): + hap_1_abc_files = [] + hap_2_abc_files = [] + with zipfile.ZipFile(hap_1) as zf1, zipfile.ZipFile(hap_2) as zf2: + for file in zf1.namelist(): + if file.endswith('.abc'): + hap_1_abc_files.append(file) + for file in zf2.namelist(): + if file.endswith('.abc'): + hap_2_abc_files.append(file) + + hap_1_abc_files.sort() + hap_2_abc_files.sort() + + if len(hap_1_abc_files) != len(hap_2_abc_files): + return False + + for idx, abc_file in enumerate(hap_1_abc_files): + with zf1.open(abc_file) as f1, zf2.open(hap_2_abc_files[idx]) as f2: + data1 = f1.read() + data2 = f2.read() + if data1 != data2: + return False + + return True + + @staticmethod + def verify_binary_consistency(task): + test_name = 'binary_consistency' + test_info = options.CompilationInfo() + task.other_tests[test_name] = test_info + debug_consistency = True + release_consistency = True + + logging.info(f"==========> Running {test_name} for task: {task.name}") + if options.arguments.hap_mode in ['all', 'release']: + # will have at lease 1 output from full compile + if len(task.backup_info.output_release) == 1: + compile_project(task, False) + backup_compile_output(task, False) + + if len(task.backup_info.output_release) == 2: + release_consistency = OtherTest.is_abc_same_in_haps(task.backup_info.output_release[0], + task.backup_info.output_release[1]) + else: + release_consistency = False + logging.debug(f"release consistency: {release_consistency}") + + if options.arguments.hap_mode in ['all', 'debug']: + if len(task.backup_info.output_debug) == 1: + compile_project(task, True) + backup_compile_output(task, True) + + if len(task.backup_info.output_debug) == 2: + debug_consistency = OtherTest.is_abc_same_in_haps(task.backup_info.output_debug[0], + task.backup_info.output_debug[1]) + else: + debug_consistency = False + logging.debug(f"debug consistency: {debug_consistency}") + + if debug_consistency and release_consistency: + test_info.result = options.TaskResult.passed + else: + test_info.result = options.TaskResult.failed + + @staticmethod + def execute_break_compile(task, is_debug): + test_name = 'break_continue_compile' + test_info = options.CompilationInfo() + task.other_tests[test_name] = test_info + + logging.info(f"==========> Running {test_name} for task: {task.name}") + clean_compile(task) + cmd = get_hvigor_compile_cmd(task.path, is_debug) + logging.debug(f'cmd: {cmd}') + logging.debug(f"cmd execution path {task.path}") + process = subprocess.Popen(cmd, shell=False, cwd=task.path, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + for line in iter(process.stdout.readline, b''): + if b'CompileArkTS' in line: + logging.debug("terminate signal sent") + process.send_signal(signal.SIGTERM) + break + + [stdout, stderr] = process.communicate(timeout=options.arguments.compile_timeout) + + logging.debug("first compile: stdcout: %s", stdout.decode('utf-8', errors="ignore")) + logging.warning("first compile: stdcerr: %s", stderr.decode('utf-8', errors="ignore")) + + logging.debug("another compile") + [stdout, stderr] = compile_project(task, is_debug) + + [is_success, time_string] = is_compile_success(stdout) + if not is_success: + test_info.result = options.TaskResult.failed + test_info.error_message = stderr + else: + passed = validate_compile_output(test_info, task, is_debug) + if passed: + test_info.result = options.TaskResult.passed + + @staticmethod + def compile_full_with_error(task, is_debug): + test_name = 'compile_with_error' + test_info = options.CompilationInfo() + task.other_tests[test_name] = test_info + + logging.info(f"==========> Running {test_name} for task: {task.name}") + modify_file_item = task.inc_modify_file + modify_file = os.path.join(task.path, *modify_file_item) + modify_file_backup = modify_file + ".bak" + shutil.copyfile(modify_file, modify_file_backup) + + patch_lines_error = options.configs.get('patch_content').get('patch_lines_error') + with open(modify_file, 'a', encoding='utf-8') as file: + file.write(patch_lines_error.get('tail')) + + [stdout, stderr] = compile_project(task, is_debug) + expected_errors = patch_lines_error.get('expected_error') + + passed = False + for expected_error in expected_errors: + if expected_error in stderr: + passed = True + break + + if passed: + test_info.result = options.TaskResult.passed + else: + test_info.result = options.TaskResult.failed + test_info.error_message = f"expected error message: {expected_errors}, but got {stderr}" + + shutil.move(modify_file_backup, modify_file) + + @staticmethod + def compile_with_exceed_length(task, is_debug): + test_name = 'compile_with_exceed_length' + test_info = options.CompilationInfo() + task.other_tests[test_name] = test_info + + logging.info(f"==========> Running {test_name} for task: {task.name}") + # get build-profile.json5 + entry_item = task.build_path[:-2] # to entry path + profile_file = os.path.join(task.path, *entry_item, 'build-profile.json5') + profile_file_backup = profile_file + ".bak" + shutil.copyfile(profile_file, profile_file_backup) + + with open(profile_file, 'r') as file: + profile_data = json5.load(file) + + long_str = 'default1234567890123456789012345678901234567890123456789012345678901234567890123456789' + \ + '012345678901234567890123456789' + logging.debug("long_str: %s", long_str) + profile_data['targets'][0]['name'] = long_str + + with open(profile_file, 'w') as file: + json5.dump(profile_data, file) + + [stdout, stderr] = compile_project(task, is_debug) + expected_error_message = 'The length of path exceeds the maximum length: 259' + + if expected_error_message in stderr: + test_info.result = options.TaskResult.passed + else: + test_info.result = options.TaskResult.failed + test_info.error_message = f"expected error message: {expected_error_message}, but got {stderr}" + + shutil.move(profile_file_backup, profile_file) + + @staticmethod + def compile_ohos_test(task): + test_name = 'ohos_test' + test_info = options.CompilationInfo() + task.other_tests[test_name] = test_info + + logging.info(f"==========> Running {test_name} for task: {task.name}") + # ohosTest has only debug mode + cmd = [get_hvigor_path(task.path), '--mode', 'module', '-p', 'module=entry@ohosTest', 'assembleHap'] + [stdout, stderr] = compile_project(task, True, cmd) + [is_success, time_string] = is_compile_success(stdout) + if not is_success: + test_info.result = options.TaskResult.failed + test_info.error_message = stderr + else: + output_file = get_compile_output_file_path(task, True) + output_dir = os.path.dirname(output_file) + output_file_name = os.path.basename(output_file) + + ohos_test_str = 'ohosTest' + output_file_name_items = output_file_name.split('-') # hap name format: entry-default-signed.hap + output_file_name_items[-2] = ohos_test_str # ohosTest hap format: entry-ohosTest-signed.hap + output_file_name = '-'.join(output_file_name_items) + + output_dir_items = output_dir.split(os.path.sep) + output_dir_items[-1] = ohos_test_str + if utils.is_windows(): + # for windows, need to add an empty string to mark between disk identifier and path + output_dir_items.insert(1, os.path.sep) + ohos_test_output_file = os.path.join(*output_dir_items, output_file_name) + + passed = validate_compile_output(test_info, task, True, ohos_test_output_file) + if passed: + test_info.result = options.TaskResult.passed + + +def disasm_abc(abc_file): + sdk_path = options.configs.get('deveco_sdk_path') + ark_disasm_path = '' + if utils.is_windows(): + ark_disasm = 'ark_disasm.exe' + else: + ark_disasm = 'ark_disasm' + ## try to find ark_disasm in api 10, api 9 sequentially + ark_disasm_10_path = os.path.join(sdk_path, '10', 'toolchains', ark_disasm) + ark_disasm_9_path = os.path.join(sdk_path, '9', 'toolchains', ark_disasm) + if os.path.exists(ark_disasm_10_path): + ark_disasm_path = ark_disasm_10_path + elif os.path.exists(ark_disasm_9_path): + ark_disasm_path = ark_disasm_9_path + else: + logging.error("ark_disasm executable not found") + return '' + + pa_file = abc_file + '.pa' + cmd = [ark_disasm_path, '--verbose', abc_file, pa_file] + logging.debug(f'cmd: {cmd}') + process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + [stdout, stderr] = process.communicate(timeout=options.arguments.compile_timeout) + + logging.debug("disasm stdcout: %s", stdout.decode('utf-8', errors="ignore")) + logging.warning("disasm: stdcerr: %s", stderr.decode('utf-8', errors="ignore")) + + return pa_file + + +def is_abc_debug_info_correct(abc_file, is_debug): + pa_file = disasm_abc(abc_file) + if not os.path.exists(pa_file): + logging.error(f"pa file not exist: {pa_file}") + return False + + debug_info_block_str = 'LOCAL_VARIABLE_TABLE' + has_debug_info_block = False + with open(pa_file, 'r', encoding='utf-8') as pa: + line = pa.readline() + while line: + if debug_info_block_str in line.strip(): + has_debug_info_block = True + break + line = pa.readline() + + if is_debug: + return has_debug_info_block + else: + return not has_debug_info_block + + +def validate_output_for_jsbundle(info, task, uncompressed_output_path, is_debug): + abc_files = [] + for root, dirs, files in os.walk(uncompressed_output_path): + for file in files: + if file.endswith('.abc'): + abc_files.append(os.path.join(root, file)) + + total_size = 0 + for file in abc_files: + total_size += os.path.getsize(os.path.join(uncompressed_output_path, file)) + if 'compatible8' not in task.type and not is_abc_debug_info_correct(file, is_debug): + # skip compatible8 outputs as disasm may failed + info.result = options.TaskResult.failed + info.error_message = f"{file} debug info not correct" + return False + + if total_size == 0: + info.result = options.TaskResult.failed + info.error_message = "abc not found or abc size is 0" + return False + else: + info.abc_size = total_size + + if is_debug: + for file in abc_files: + sourcemap_file = file.replace('.abc', '.js.map') + if not os.path.exists(os.path.join(uncompressed_output_path, sourcemap_file)): + info.result = options.TaskResult.failed + info.error_message = "sourcemap not found" + return False + + return True + + +def validate_output_for_esmodule(info, task, uncompressed_output_path, is_debug): + abc_generated_path = os.path.join(uncompressed_output_path, 'ets') + + modules_abc_path = os.path.join(abc_generated_path, 'modules.abc') + if not os.path.exists(modules_abc_path): + info.result = options.TaskResult.failed + info.error_message = "modules.abc not found" + return False + + modules_abc_size = os.path.getsize(modules_abc_path) + if modules_abc_size <= 0: + info.result = options.TaskResult.failed + info.error_message = "modules.abc size is 0" + return False + if not is_abc_debug_info_correct(modules_abc_path, is_debug): + info.result = options.TaskResult.failed + info.error_message = "modules.abc debug info not correct" + return False + info.abc_size = modules_abc_size + + if 'widget' in task.type: + widget_abc_path = os.path.join(abc_generated_path, 'widgets.abc') + if not os.path.exists(widget_abc_path): + info.result = options.TaskResult.failed + info.error_message = "widgets.abc not found" + return False + + widgets_abc_size = os.path.getsize(widget_abc_path) + if widgets_abc_size <= 0: + info.result = options.TaskResult.failed + info.error_message = "widgets.abc size is 0" + return False + if not is_abc_debug_info_correct(widget_abc_path, is_debug): + info.result = options.TaskResult.failed + info.error_message = "widgets.abc debug info not correct" + return False + info.abc_size += widgets_abc_size + + if is_debug: + sourcemap_path = abc_generated_path + else: + sourcemap_path = os.path.join(task.path, *(task.build_path), *(task.cache_path), 'release') + sourcemap_file = os.path.join(sourcemap_path, 'sourceMaps.map') + if not os.path.exists(sourcemap_file): + info.result = options.TaskResult.failed + info.error_message = "sourcemap not found" + return False + + return True + + +def collect_compile_time(info, time_string): + time_min = 0.0 + time_second = 0.0 + time_millisecond = 0.0 + + time_items = time_string.split() + for idx, item in enumerate(time_items): + if item == 'min': + time_min = float(time_items[idx - 1]) * 60 + if item == 's': + time_second = float(time_items[idx - 1]) + if item == 'ms': + time_millisecond = round(float(time_items[idx - 1]) / 1000, 3) + + info.time = round(time_min + time_second + time_millisecond, 3) + + +def get_compile_output_file_path(task, is_debug): + output_file = '' + + if is_debug: + output_file = os.path.join(task.path, *(task.build_path), *(task.output_hap_path)) + else: + output_file = os.path.join(task.path, *(task.build_path), *(task.output_app_path)) + + return output_file + + +def validate_compile_output(info, task, is_debug, output_file=''): + passed = False + + if output_file == '': + output_file = get_compile_output_file_path(task, is_debug) + uncompressed_output_file = output_file + '.uncompressed' + + if not os.path.exists(output_file): + logging.error("output file for task %s not exists: %s", task.name, output_file) + passed = False + + info.result = options.TaskResult.failed + info.error_message = "Hap not found" + return passed + try: + with zipfile.ZipFile(output_file, 'r') as zip_ref: + zip_ref.extractall(uncompressed_output_file) + except Exception as e: + logging.error(f"unzip exception: {e}") + logging.error(f"uncompressed output file for task {task.name} failed. output file: {output_file}") + passed = False + + info.result = options.TaskResult.failed + info.error_message = "Hap uncompressed failed, cannot exam build products" + return passed + + if utils.is_esmodule(task.type): + passed = validate_output_for_esmodule(info, task, uncompressed_output_file, is_debug) + else: + passed = validate_output_for_jsbundle(info, task, uncompressed_output_file, is_debug) + + shutil.rmtree(uncompressed_output_file) + + return passed + + +def run_compile_output(info, task_path): + ## TODO: + # 1)install hap + # 2)run hap and verify + return False + + +def is_compile_success(compile_stdout): + pattern = r"BUILD SUCCESSFUL in (\d+ min )?(\d+ s )?(\d+ ms)?" + match_result = re.search(pattern, compile_stdout) + if not match_result: + return [False, ''] + + return [True, match_result.group(0)] + + +def validate(compilation_info, task, is_debug, stdout, stderr, output_file=''): + info = {} + if is_debug: + info = compilation_info.debug_info + else: + info = compilation_info.release_info + + # ret_code will be 1 if there's stderr, use "COMPILE SUCCESSFUL" as a flag to make a judge + [is_success, time_string] = is_compile_success(stdout) + if not is_success: + info.result = options.TaskResult.failed + info.error_message = stderr + return False + + passed = validate_compile_output(info, task, is_debug, output_file) + + if options.arguments.run_haps: + passed &= run_compile_output(info) + + if passed: + collect_compile_time(info, time_string) + info.result = options.TaskResult.passed + + return passed + + +def get_hvigor_path(project_path): + hvigor = '' + if utils.is_windows(): + hvigor = os.path.join(project_path, 'hvigorw.bat') + else: + hvigor = os.path.join(project_path, 'hvigorw') + return hvigor + + +def get_hvigor_compile_cmd(project_path, is_debug): + cmd = [get_hvigor_path(project_path)] + if is_debug: + cmd.append('assembleHap') + else: + cmd.append('assembleApp') + return cmd + + +def compile_project(task, is_debug, cmd=''): + if not cmd: + cmd = get_hvigor_compile_cmd(task.path, is_debug) + + logging.debug(f'cmd: {cmd}') + logging.debug(f"cmd execution path {task.path}") + process = subprocess.Popen(cmd, shell=False, cwd=task.path, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate(timeout=options.arguments.compile_timeout) + stdout_utf8 = stdout.decode("utf-8", errors="ignore") + stderr_utf8 = stderr.decode("utf-8", errors="ignore") + logging.debug(f"cmd stdout: {stdout_utf8}") + logging.debug(f"cmd stderr: {stderr_utf8}") + + return [stdout_utf8, stderr_utf8] + + +def clean_compile(task): + cmd = [get_hvigor_path(task.path), 'clean'] + logging.debug(f'cmd: {cmd}') + logging.debug(f"cmd execution path {task.path}") + process = subprocess.Popen(cmd, shell=False, cwd=task.path, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = process.communicate(timeout=options.arguments.compile_timeout) + + +def compile_incremental(task, is_debug): + logging.info(f"==========> Running task: {task.name} in incremental compilation") + [stdout, stderr] = compile_project(task, is_debug) + + [is_success, time_string] = is_compile_success(stdout) + if not is_success: + logging.error("Incremental compile failed due to first compile failed!") + return + + if options.arguments.compile_mode == 'incremental': + passed = validate(task.full_compilation_info, task, is_debug, stdout, stderr) + if not passed: + logging.error("Incremental compile failed due to first compile failed!") + return + + backup_compile_output(task, is_debug) + backup_compile_cache(task, is_debug) + + IncrementalTest.compile_incremental_no_modify(task, is_debug) + IncrementalTest.compile_incremental_add_oneline(task, is_debug) + IncrementalTest.compile_incremental_add_file(task, is_debug) + IncrementalTest.compile_incremental_delete_file(task, is_debug) + IncrementalTest.compile_incremental_reverse_hap_mode(task, is_debug) + IncrementalTest.compile_incremental_modify_module_name(task, is_debug) + + +def backup_compile_output(task, is_debug): + backup_path = task.backup_info.cache_path + if not os.path.exists(backup_path): + os.mkdir(backup_path) + + if is_debug: + if len(task.backup_info.output_debug) == 2: + return + + backup_output_path = os.path.join(backup_path, 'output', 'debug') + if not os.path.exists(backup_output_path): + os.makedirs(backup_output_path) + + else: + if len(task.backup_info.output_release) == 2: + return + + backup_output_path = os.path.join(backup_path, 'output', 'release') + if not os.path.exists(backup_output_path): + os.makedirs(backup_output_path) + + output_file = get_compile_output_file_path(task, is_debug) + shutil.copy(output_file, backup_output_path) + backup_output = os.path.join(backup_output_path, os.path.basename(output_file)) + backup_time_output = backup_output + '-' + utils.get_time_string() + shutil.move(backup_output, backup_time_output) + + if is_debug: + task.backup_info.output_debug.append(backup_time_output) + else: + task.backup_info.output_release.append(backup_time_output) + + +def backup_compile_cache(task, is_debug): + backup_path = task.backup_info.cache_path + if not os.path.exists(backup_path): + os.mkdir(backup_path) + + backup_cache_path = os.path.join(backup_path, 'cache') + if not os.path.exists(backup_cache_path): + os.mkdir(backup_cache_path) + cache_files = os.path.join(task.path, *(task.build_path), *(task.cache_path)) + + if is_debug: + if task.backup_info.cache_debug != '': + return + + cache_files = os.path.join(cache_files, 'debug') + backup_cache_file = os.path.join(backup_cache_path, 'debug') + shutil.copytree(cache_files, backup_cache_file) + task.backup_info.cache_debug = backup_cache_file + else: + if task.backup_info.cache_release != '': + return + + cache_files = os.path.join(cache_files, 'release') + backup_cache_file = os.path.join(backup_cache_path, 'release') + shutil.copytree(cache_files, backup_cache_file) + task.backup_info.cache_release = backup_cache_file + + +def execute_full_compile(task): + logging.info(f"==========> Running task: {task.name} in full compilation") + clean_compile(task) + passed = False + if options.arguments.hap_mode in ['all', 'release']: + [stdout, stderr] = compile_project(task, False) + passed = validate(task.full_compilation_info, task, False, stdout, stderr) + if passed: + backup_compile_output(task, False) + clean_compile(task) + if options.arguments.hap_mode in ['all', 'debug']: + [stdout, stderr] = compile_project(task, True) + passed = validate(task.full_compilation_info, task, True, stdout, stderr) + if passed: + backup_compile_output(task, True) + clean_compile(task) + + return passed + + +def execute_incremental_compile(task): + logging.info(f"==========> Running task: {task.name} in incremental compilation") + if options.arguments.hap_mode in ['all', 'release']: + compile_incremental(task, False) + if options.arguments.hap_mode in ['all', 'debug']: + compile_incremental(task, True) + clean_compile(task) + + +def clean_backup(task): + if os.path.exists(task.backup_info.cache_path): + shutil.rmtree(task.backup_info.cache_path) + return + + +def execute(test_tasks): + for task in test_tasks: + try: + logging.info(f"======> Running task: {task.name}") + if options.arguments.compile_mode in ['all', 'full']: + if not execute_full_compile(task): + logging.info("Full compile failed, skip other tests!") + continue + + if options.arguments.compile_mode in ['all', 'incremental']: + execute_incremental_compile(task) + + OtherTest.verify_binary_consistency(task) + + # for these tests, use one hapMode maybe enough + is_debug = True if options.arguments.hap_mode == 'debug' else False + OtherTest.execute_break_compile(task, is_debug) + if 'error' in task.type: + OtherTest.compile_full_with_error(task, is_debug) + + if 'exceed_length_error' in task.type: + OtherTest.compile_with_exceed_length(task, is_debug) + + if 'ohosTest' in task.type: + OtherTest.compile_ohos_test(task) + + logging.info(f"======> Running task: {task.name} finised") + except Exception as e: + logging.exception(e) + finally: + clean_backup(task) diff --git a/test/scripts/sdk_test/options.py b/test/scripts/sdk_test/options.py new file mode 100644 index 0000000000000000000000000000000000000000..4d724f320381516369b82dacf3999b2f12836994 --- /dev/null +++ b/test/scripts/sdk_test/options.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +""" +Copyright (c) 2021 Huawei Device Co., Ltd. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Description: process options and configs for test suite +""" + +import argparse +import logging +import os +from enum import Enum + +import yaml + +from utils import init_logger + + +arguments = {} +configs = {} + + +class TaskResult(Enum): + undefind = 0 + passed = 1 + failed = 2 + + +class CompilationInfo: + def __init__(self): + self.result = TaskResult.undefind + self.error_message = '' + self.time = 0 + self.abc_size = 0 + + +class FullCompilationInfo: + def __init__(self): + self.debug_info = CompilationInfo() + self.release_info = CompilationInfo() + + +class IncCompilationInfo: + def __init__(self): + self.debug_info = CompilationInfo() + self.release_info = CompilationInfo() + self.name = '' + + +class BackupInfo: + def __init__(self): + self.cache_path = '' + self.cache_debug = '' + self.cache_release = '' + self.output_debug = [] + self.output_release = [] + + +class TestTask: + def __init__(self): + self.name = '' + self.path = '' + self.type = '' + self.build_path = [] + self.output_hap_path = '' + self.output_app_path = '' + self.inc_modify_file = [] + + self.full_compilation_info = FullCompilationInfo() + self.incre_compilation_info = {} + self.other_tests = {} + + self.backup_info = BackupInfo() + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--sdkPath', type=str, dest='sdk_path', default='', + help='specify sdk path if need to update sdk. Default to use sdk specify in config.yaml') + parser.add_argument('--buildMode', type=str, dest='build_mode', default='all', + choices=['all', 'assemble', 'preview', 'hotreload', 'hotfix'], + help='specify build mode') + parser.add_argument('--hapMode', type=str, dest='hap_mode', default='all', + choices=['all', 'debug', 'release'], + help='specify hap mode') + parser.add_argument('--compileMode', type=str, dest='compile_mode', default='all', + choices=['all', 'full', 'incremental'], + help='specify compile mode') + parser.add_argument('--testCase', type=str, dest='test_case', default='all', + choices=['all', 'fa', 'stage', 'compatible8', 'js'], + help='specify test cases') + parser.add_argument('--testHap', type=str, dest='test_hap', default='all', + help="specify test haps, option can be 'all' or a list of haps seperated by ','") + parser.add_argument('--imagePath', type=str, dest='image_path', default='', + help='specify image path if need to update rk/phone images. Default not to update image') + parser.add_argument('--runHaps', dest='run_haps', action='store_true', default=False, + help='specify whether to verify by running the haps on board.') + parser.add_argument('--logLevel', type=str, dest='log_level', default='error', + choices=['debug', 'info', 'warn', 'error'], + help='specify log level of test suite') + parser.add_argument('--logFile', type=str, dest='log_file', default='', + help='specify the file log outputs to, empty string will output to console') + parser.add_argument('--compileTimeout', type=int, dest='compile_timeout', default=1800, + help='specify deveco compilation timeout') + global arguments + arguments = parser.parse_args() + + +def parse_configs(): + config_yaml = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml') + with open(config_yaml, 'r') as config_file: + global configs + configs = yaml.safe_load(config_file) + + +def create_test_tasks(): + task_list = [] + haps_list = configs.get('haps') + test_cases = 'all' if arguments.test_case == 'all' else [] + test_haps = 'all' if arguments.test_hap == 'all' else [] + if test_cases != 'all': + test_cases = arguments.test_case.split(',') + if test_haps != 'all': + test_haps = arguments.test_hap.split(',') + + for hap in haps_list: + if test_cases == 'all' or test_haps == 'all' \ + or (test_cases and (hap['type'][0] in test_cases)) \ + or (test_haps and (hap['name'] in test_haps)): + if not os.path.exists(hap['path']): + logging.warning("Path of hap %s dosen't exist: %s", hap['name'], hap['path']) + continue + task = TestTask() + task.name = hap['name'] + task.path = hap['path'] + task.type = hap['type'] + task.build_path = hap['build_path'] + task.cache_path = hap['cache_path'] + task.output_hap_path = hap['output_hap_path'] + task.output_app_path = hap['output_app_path'] + task.inc_modify_file = hap['inc_modify_file'] + task.backup_info.cache_path = os.path.join(task.path, 'test_suite_cache') + + task_list.append(task) + + return task_list + + +def process_options(): + parse_args() + init_logger(arguments.log_level, arguments.log_file) + parse_configs() + return create_test_tasks() \ No newline at end of file diff --git a/test/scripts/sdk_test/preparation.py b/test/scripts/sdk_test/preparation.py new file mode 100644 index 0000000000000000000000000000000000000000..d22cf361004e987d22d8bac927086a076e87c154 --- /dev/null +++ b/test/scripts/sdk_test/preparation.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +""" +Copyright (c) 2023 Huawei Device Co., Ltd. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Description: prepare environment for test +""" + +import logging +import os +import shutil +import sys +import tarfile +import zipfile + +import validators + +import options +from utils import is_linux, is_mac, is_windows, get_time_string, get_api_version, npm_install, check_gzip_file, download + + +def setup_env(): + old_env = os.environ.copy() + old_env_path = old_env['PATH'] + + java_home = os.path.join(options.configs.get('deveco_path'), 'jbr') + node_js_path = options.configs.get('node_js_path') + java_path = os.path.join(java_home, 'bin') + + os.environ['PATH'] = os.pathsep.join([java_path, node_js_path]) + os.pathsep + old_env_path + os.environ['JAVA_HOME'] = java_home + + +def check_deveco_env(): + if is_linux(): + return False + + java_path = os.path.join(options.configs.get('deveco_path'), 'jbr') + if not os.path.exists(java_path): + logging.error("Java not found!") + return False + + if not os.path.exists(options.configs.get('node_js_path')): + logging.error("Node js not found!") + return False + + return True + + +def get_sdk_from_remote(sdk_url): + deveco_sdk_path = options.configs.get('deveco_sdk_path') + temp_floder = deveco_sdk_path + '_temp' + sdk_temp_file = os.path.join(temp_floder, 'ohos-sdk-full.tar.gz') + + if os.path.exists(temp_floder): + shutil.rmtree(temp_floder) + os.mkdir(temp_floder) + download(sdk_url, sdk_temp_file, 'ohos-sdk-full.tar.gz') + if not check_gzip_file(sdk_temp_file): + logging.error('The downloaded file is not a valid gzip file.') + return '', '' + with tarfile.open(sdk_temp_file, 'r:gz') as tar: + tar.extractall(temp_floder) + + sdk_floder = os.path.join(temp_floder, 'SDK') + for item in os.listdir(os.path.join(*[temp_floder, 'ohos-sdk', 'windows'])): + with zipfile.ZipFile(os.path.join(*[temp_floder, 'ohos-sdk', 'windows', item])) as zip_file: + zip_file.extractall(os.path.join(sdk_floder)) + + if not npm_install(os.path.join(*[sdk_floder, 'ets', 'build-tools', 'ets-loader'])) or \ + not npm_install(os.path.join(*[sdk_floder, 'js', 'build-tools', 'ace-loader'])): + return '', '' + + api_version = get_api_version(os.path.join(*[sdk_floder, 'ets', 'oh-uni-package.json'])) + return sdk_floder, api_version + + +def update_sdk_to_deveco(sdk_path, api_version): + if not api_version: + api_version = '9' + deveco_sdk_path = options.configs.get('deveco_sdk_path') + deveco_sdk_version_path = os.path.join(deveco_sdk_path, api_version) + if os.path.exists(deveco_sdk_version_path): + shutil.move(deveco_sdk_version_path, deveco_sdk_version_path + '-' + get_time_string()) + for item in os.listdir(sdk_path): + shutil.move(os.path.join(sdk_path, item), os.path.join(deveco_sdk_version_path, item)) + + +def prepare_sdk(): + sdk_arg = options.arguments.sdk_path + if sdk_arg == '': + return True # use the sdk specified in config.yaml + + api_version = '' + sdk_path = sdk_arg + if validators.url(sdk_arg): + sdk_path, api_version = get_sdk_from_remote(sdk_arg) + + if not sdk_path or not os.path.exists(sdk_path): + return False + + update_sdk_to_deveco(sdk_path, api_version) + return True + + +def prepare_image(): + if options.arguments.run_haps: + return True + + ## TODO: 1)download image, 2)flash image + + return True + + +def prepare_test_env(): + prepared = check_deveco_env() + setup_env() + prepared = prepared and prepare_sdk() and prepare_image() + return prepared diff --git a/test/scripts/sdk_test/readme.md b/test/scripts/sdk_test/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..7174c8a21b73b16be38cbc37eed1cb05df75db43 --- /dev/null +++ b/test/scripts/sdk_test/readme.md @@ -0,0 +1,26 @@ + +# SDK Test Suite Overview +This test suite can perform end-to-end SDK test verification. There are two ways to perform verification: +1) Verify if abc and sourcemap are generated in the compiled package. +2) Verify if the application of the compiled package can run normally (this feature is under development). + +## SDK Test Suite Usage +### Operating Environment +The SDK test automation script runs on Windows, Python 3.7 and above. The MAC version has not been verified yet. + +### Test Preparation +1. Ensure that Deveco is installed in the environment. +2. Install dependencies of the test suite: +`python3 -m pip install pyyaml validators requests httpx tqdm json5 pandas` +3. Modify the configuration file `config.yaml`, configure relevant parameters of Deveco and the test application. Detailed configuration instructions can be found in the file. + +### Running Tests +The test suite supports daily and manual runs. + +#### Daily Run +The daily run will download the SDK built on the current day from the trunk branch and use it to perform a full test verification. +Run the command: `python entry.py` + +#### Manual Run +Run the command: `python run.py` +By default, it will run all the test items. Optional parameters can be viewed through `--help`. \ No newline at end of file diff --git a/test/scripts/sdk_test/readme_zh.md b/test/scripts/sdk_test/readme_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..54885d08b42cf73849c76ff5ba89135c329a07f5 --- /dev/null +++ b/test/scripts/sdk_test/readme_zh.md @@ -0,0 +1,25 @@ +# SDK测试套说明 +本测试套可以执行端到端的SDK测试验证。 +验证方式有两种: +1) 验证编译打包的产物中,abc和sourcemap是否生成。 +2) 验证编译产物的应用是否可以正常运行(该功能在开发中)。 + +## SDK测试套使用 +### 运行环境 +SDK测试自动化脚本运行环境为windows,python3.7及以上。MAC版本的运行暂未验证。 +### 测试准备 +1. 确保环境中已安装Deveco +2. 安装测试套依赖: +`python3 -m pip install pyyaml validators requests httpx tqdm json5 pandas` +3. 修改配置文件config.yaml,配置Deveco和测试应用的相关参数。各项配置说明详见该文件。 + +### 测试运行 +测试套支持daily运行和手动单次运行。 +#### daily运行 +daily运行将从主干分支下载当日构建的sdk,使用该sdk进行全量的测试项验证: +执行命令: +`python entry.py` +#### 手动运行 +执行命令: +`python run.py` +不带参数默认跑全量的测试项。可选参数可通过`--help`查看。 \ No newline at end of file diff --git a/test/scripts/sdk_test/result.py b/test/scripts/sdk_test/result.py new file mode 100644 index 0000000000000000000000000000000000000000..208287199005a7ae567ff585645e92dd0fc2fd36 --- /dev/null +++ b/test/scripts/sdk_test/result.py @@ -0,0 +1,376 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +""" +Copyright (c) 2023 Huawei Device Co., Ltd. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Description: output test results +""" + +import copy +import logging +import os +import time + +import pandas + +import options + + +incremetal_compile_tests = ["no_change", + "add_oneline", + "add_file", + "delete_file", + "reverse_hap_mode", + "change_module_name" + ] + +other_tests = ["binary_consistency", + "break_continue_compile", + "compile_with_error", + "compile_with_exceed_length", + "ohos_test" + ] + + +class TestResult: + def __init__(self): + self.passed = [] + self.failed = [] + self.time = 0.0 + + +def print_result(test_result, test_tasks): + logging.info("========================================") + logging.info("Test finished. The result is as following:") + logging.info("=====> Summary") + logging.info("Total test number: %s, took time: %.3f s", len(test_tasks), test_result.time) + logging.info("Passed test number: %s", len(test_result.passed)) + logging.info("Failed test number: %s", len(test_result.failed)) + + logging.info("=====> Detail Information") + logging.info("-----") + idx = 1 + for task in test_tasks: + logging.info("task index: %d", idx) + idx = idx + 1 + logging.info("task name: %s", task.name) + logging.info("task type: %s", task.type) + # print full compile result + logging.info("--full compilation result:") + logging.info("debug: %s, abc_size(byte) %s, time(s) %s, error message: %s", + task.full_compilation_info.debug_info.result, + task.full_compilation_info.debug_info.abc_size, + task.full_compilation_info.debug_info.time, + task.full_compilation_info.debug_info.error_message) + logging.info("release: %s, abc_size(byte) %s, time(s) %s, error message: %s", + task.full_compilation_info.release_info.result, + task.full_compilation_info.release_info.abc_size, + task.full_compilation_info.release_info.time, + task.full_compilation_info.debug_info.error_message) + + # print incremental compile result + logging.info("--incremental compilation result:") + for inc_task in task.incre_compilation_info.values(): + logging.info("incre test: %s", inc_task.name) + logging.info("debug: %s, abc_size(byte) %s, time(s) %s, error message: %s", + inc_task.debug_info.result, + inc_task.debug_info.abc_size, + inc_task.debug_info.time, + inc_task.debug_info.error_message) + logging.info("release: %s, abc_size(byte) %s, time(s) %s, error message: %s", + inc_task.release_info.result, + inc_task.release_info.abc_size, + inc_task.release_info.time, + inc_task.release_info.error_message) + + # print other tests result + for name, task_info in task.other_tests.items(): + logging.info("--test name: %s", name) + logging.info("result: %s, error message: %s", + task_info.result, + task_info.error_message) + + logging.info("-----") + logging.info("========================================") + + +def is_full_compilation_passed(task_info): + if not options.arguments.compile_mode in ['all', 'full']: + return True + + passed_debug = True + passed_release = True + + if options.arguments.hap_mode in ['all', 'release']: + passed_release = task_info.release_info.result == options.TaskResult.passed + if options.arguments.hap_mode == ['all', 'debug']: + passed_debug = task_info.debug_info.result == options.TaskResult.passed + + return passed_debug and passed_release + + +def is_incremental_compilation_passed(task_info): + if not options.arguments.compile_mode in ['all', 'incremental']: + return True + + if len(task_info) == 0: + return False + + passed_debug = True + passed_release = True + for inc_task in task_info.values(): + if options.arguments.hap_mode in ['all', 'release']: + passed_release = passed_release and inc_task.release_info.result == options.TaskResult.passed + if options.arguments.hap_mode == ['all', 'debug']: + passed_debug = passed_debug and inc_task.debug_info.result == options.TaskResult.passed + + return passed_debug and passed_release + + +def is_task_passed(task): + passed = is_full_compilation_passed(task.full_compilation_info) and \ + is_incremental_compilation_passed(task.incre_compilation_info) + + for test in task.other_tests.values(): + passed = passed and (test.result == options.TaskResult.passed) + + return passed + + +def collect_result(test_result, test_tasks, start_time): + for task in test_tasks: + if not is_task_passed(task): + test_result.failed.append(task) + else: + test_result.passed.append(task) + + end_time = time.time() + test_result.time = round(end_time - start_time, 3) + + +def get_result_symbol(result_type): + if result_type == options.TaskResult.passed: + return '√' + elif result_type == options.TaskResult.failed: + return '×' + else: + return '-' + + +def generate_summary_data(test_result, test_tasks): + ## collect summary data + passed_task_name_list = [] + for task in test_result.passed: + passed_task_name_list.append(task.name) + failed_task_name_list = [] + for task in test_result.failed: + failed_task_name_list.append(task.name) + + summary_data = { + 'Total Test Number': len(test_tasks), + 'Passed Test Number': len(test_result.passed), + 'Failed Test Number': len(test_result.failed), + 'Passed Tests': ','.join(passed_task_name_list), + 'Failed Tests': ','.join(failed_task_name_list), + 'Test Took Time(s)': test_result.time + } + + return summary_data + + +def generate_detail_data(test_tasks): + time_size_data = [] + result_data = [] + + idx = 0 + for task in test_tasks: + idx += 1 + task_time_size_data = { + 'Task Index': idx, + 'Task Name': task.name + } + task_result_data = copy.deepcopy(task_time_size_data) + task_result_data['Task Type'] = ','.join(task.type) + + full_compilation_debug = task.full_compilation_info.debug_info + full_compilation_release = task.full_compilation_info.release_info + task_time_size_data['[Full Compilation]\n[Debug]\n[Compilation Time(s)]'] = full_compilation_debug.time + task_time_size_data['[Full Compilation]\n[Release]\n[Compilation Time(s)]'] = full_compilation_release.time + task_result_data['[Debug]'] = get_result_symbol(full_compilation_debug.result) + task_result_data['[Release]'] = get_result_symbol(full_compilation_release.result) + + for test in incremetal_compile_tests: + debug_result = options.TaskResult.undefind + release_result = options.TaskResult.undefind + if test in task.incre_compilation_info.keys(): + inc_task_info = task.incre_compilation_info[test] + debug_result = inc_task_info.debug_info.result + release_result = inc_task_info.release_info.result + task_result_data[f'[Debug]\n{test}'] = get_result_symbol(debug_result) + task_result_data[f'[Release]\n{test}'] = get_result_symbol(release_result) + + if test == 'add_oneline': + debug_test_time = 0 + release_test_time = 0 + if test in task.incre_compilation_info.keys(): + inc_task_info = task.incre_compilation_info[test] + debug_test_time = inc_task_info.debug_info.time + release_test_time = inc_task_info.release_info.time + + task_time_size_data['[Incremental Compilation]\n[Debug]\n[Compilation Time(s)]'] = debug_test_time + task_time_size_data['[Incremental Compilation]\n[Release]\n[Compilation Time(s)]'] = release_test_time + + for test in other_tests: + result = options.TaskResult.undefind + if test in task.other_tests.keys(): + task_info = task.other_tests[test] + result = task_info.result + task_result_data[f'{test}'] = get_result_symbol(result) + + task_time_size_data['[Abc Size(byte)]\n[Debug]'] = full_compilation_debug.abc_size + task_time_size_data['[Abc Size(byte)]\n[Release]'] = full_compilation_release.abc_size + time_size_data.append(task_time_size_data) + result_data.append(task_result_data) + + detail_data = { + 'result_data': result_data, + 'time_size_data': time_size_data + } + return detail_data + + +def generate_data_html(summary_data, detail_data): + # summary table + key_value_pairs = [f'
+ Notes:
+ 1. Incremental compilation time refers to add-one line incremental compile.
+ 2. For details compile output or error message during compile, please refer to attachment of log file.
+ 3. For sdk commit tags, please refer to attachment of manifest file(to be added).
+
{test_part} run failed
' return content - with open(file_name, 'r') as f: + with open(file_name, 'r', encoding='utf-8') as f: content += f.read() return content - - + + def add_attachment(msg, file_list): for file in file_list: if os.path.exists(file): - with open(file, 'r') as f: - msg.add_attachment(f.read(), 'html', filename=os.path.basename(file)) + with open(file, 'r', encoding='utf-8') as f: + msg.add_attachment(f.read(), 'html', filename=os.path.basename(file)) def send_email(): @@ -55,12 +55,12 @@ def send_email(): perf_test = data["perf_report_file"] attachment_files = data["attatchment_files"] yl.close() - + msg = EmailMessage() msg['From'] = sender msg['To'] = ", ".join(receiver) msg['Subject'] = "Arkcompiler Test" - + html = "" dividing_line = '