diff --git a/config_checking/checkers/__init__.py b/config_checking/checkers/__init__.py index ccb5bc2aafd72ea4d68e6a4bdb7bf5e62bc9703d..76787d1926bf05e1a1e6592b038db16b0214c8a6 100644 --- a/config_checking/checkers/__init__.py +++ b/config_checking/checkers/__init__.py @@ -4,6 +4,7 @@ import config_checking.checkers.pip_checker import config_checking.checkers.checkpoint_checker import config_checking.checkers.dataset_checker import config_checking.checkers.weights_checker +import config_checking.checkers.hyperparameter_checker from config_checking.checkers.base_checker import BaseChecker diff --git a/config_checking/checkers/base_checker.py b/config_checking/checkers/base_checker.py index e986b2623d06ab771f8f3f4789cabd2c8570c5b7..1676b0ad36a090a7a445aab511126151d277db13 100644 --- a/config_checking/checkers/base_checker.py +++ b/config_checking/checkers/base_checker.py @@ -9,6 +9,7 @@ class PackInput: self.ckpt_path = config_dict.get("ckpt path", None) self.need_env_args = config_dict.get("env args", None) self.need_pip_data = config_dict.get("pip data", None) + self.model_paths= config_dict.get("model_paths", None) self.output_zip_path = config_dict.get("output zip path", "./config_check_pack.zip") self.model = model diff --git a/config_checking/checkers/hyperparameter_checker.py b/config_checking/checkers/hyperparameter_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca1a2f60328fd072d81b6695e5667f569dd0765 --- /dev/null +++ b/config_checking/checkers/hyperparameter_checker.py @@ -0,0 +1,199 @@ +# Copyright (c) 2024-2024, Huawei Technologies Co., Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from msprobe.pytorch.config_checking.checkers.base_checker import BaseChecker +from msprobe.pytorch.config_checking.config_checker import register_checker_item +from msprobe.pytorch.config_checking.utils.packing import add_file_to_zip +from msprobe.pytorch.config_checking.utils.utils import load_json, compare_dict, write_list_to_file +from msprobe.pytorch.config_checking.utils.utils import config_checking_print +from typing import Union, List, Dict, Any +from difflib import SequenceMatcher +import tempfile +import re + +@register_checker_item("hyperparameter") +class HyperparameterChecker(BaseChecker): + input_needed = "shell_path" + target_name_in_zip = "hyperparameters" + result_filename = "hyperparameter_diff.txt" + + PARAMETER_NAME_MAPPING = { + "learning_rate": ["lr", "learningrate"], + "batch_size": ["batch", "bs", "batch_size_per_gpu"], + "epochs": ["num_epochs", "max_epochs", "epoch"], + "weight_decay": ["wd", "weightdecay"], + "dropout_rate": ["dropout", "drop_rate"], + } + + @staticmethod + def pack(pack_input): + shell_path = pack_input.shell_path + output_zip_path = pack_input.output_zip_path + + if not isinstance(shell_path, list): + raise TypeError("shell_path should be a list of file paths.") + + for script_path in shell_path: + if os.path.isfile(script_path): + hyperparameters = HyperparameterChecker._extract_hyperparameters_from_script(script_path) + if hyperparameters: + dest_path_in_zip = os.path.join(HyperparameterChecker.target_name_in_zip, os.path.splitext(os.path.basename(script_path))[0] + ".json") + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as tmp_file: + json.dump(hyperparameters, tmp_file, indent=4) + tmp_file_path = tmp_file.name + add_file_to_zip(output_zip_path, tmp_file_path, dest_path_in_zip) + os.remove(tmp_file_path) + config_checking_print(f"add hyperparameters args to zip") + else: + config_checking_print(f"Warning: Failed to extract hyperparameters from script {script_path}") + else: + config_checking_print(f"Warning: Script path {script_path} is not a file.") + + @staticmethod + def _extract_hyperparameters_from_script(script_path: str) -> Dict[str, Any]: + """ + Extracts arguments from bash script used to run a model training. + """ + hyperparameters = {} + with open(script_path, 'r') as file: + script_content = file.read() + + command_line = re.search(r'torchrun\s+(.*?)\s*\|', script_content, re.DOTALL) + if command_line: + command_line = command_line.group(1) + + blocks = re.findall(r'(\w+_ARGS)="(.*?)"', script_content, re.DOTALL) + block_contents = {} + for block_name, block_content in blocks: + block_content = block_content.replace('\n', ' ') + block_contents[block_name] = block_content + command_line = command_line.replace(f"${block_name}", block_content) + + matches = re.findall(r'--([\w-]+)(?:\s+([^\s\\]+))?', command_line) + for match in matches: + key, value = match + if value and value.startswith('$'): + env_var = re.search(rf'{value[1:]}="?(.*?)"?\s', script_content) + if env_var: + value = env_var.group(1) + hyperparameters[key] = value if value else True + + return hyperparameters + + @staticmethod + def _fuzzy_match_parameter(param_name: str, available_params: Dict[str, Any]) -> Union[str, None]: + """ + Fuzzy matches a parameter name against available parameter names using predefined mappings and string similarity. + """ + if param_name in available_params: + return param_name + + canonical_name = None + for standard_name, aliases in HyperparameterChecker.PARAMETER_NAME_MAPPING.items(): + if param_name == standard_name or param_name in aliases: + canonical_name = standard_name + break + + if canonical_name: + if canonical_name in available_params: + return canonical_name + for alias in HyperparameterChecker.PARAMETER_NAME_MAPPING[canonical_name]: + if alias in available_params: + config_checking_print(f"Matched '{param_name}' to alias '{alias}' via canonical name '{canonical_name}'") + return alias + + best_match_name = None + best_match_ratio = 0.8 + for available_param_name in available_params: + ratio = SequenceMatcher(None, param_name.lower(), available_param_name.lower()).ratio() + if ratio > best_match_ratio: + best_match_ratio = ratio + best_match_name = available_param_name + + if best_match_name: + config_checking_print(f"Fuzzy matched parameter '{param_name}' to '{best_match_name}' (similarity: {best_match_ratio:.2f})") + return best_match_name + + return None + + def compare(bench_dir, cmp_dir, output_path): + bench_model_dir = os.path.join(bench_dir, HyperparameterChecker.target_name_in_zip) + cmp_model_dir = os.path.join(cmp_dir, HyperparameterChecker.target_name_in_zip) + output_filepath = os.path.join(output_path, HyperparameterChecker.result_filename) + + bench_hyperparameters = {} + cmp_hyperparameters = {} + + if os.path.exists(bench_model_dir): + for root, _, files in os.walk(bench_model_dir): + for file in files: + if file.endswith('.json'): + filepath = os.path.join(root, file) + relative_filepath = os.path.relpath(filepath, bench_model_dir) + params = load_json(filepath) + if params: + bench_hyperparameters[relative_filepath] = params + + if os.path.exists(cmp_model_dir): + for root, _, files in os.walk(cmp_model_dir): + for file in files: + if file.endswith('.json'): + filepath = os.path.join(root, file) + relative_filepath = os.path.relpath(filepath, cmp_model_dir) + params = load_json(filepath) + if params: + cmp_hyperparameters[relative_filepath] = params + + all_diffs = [] + all_files = set(bench_hyperparameters.keys()) | set(cmp_hyperparameters.keys()) + + for filename in all_files: + bench_params = bench_hyperparameters.get(filename, None) + cmp_params = cmp_hyperparameters.get(filename, None) + + if bench_params is not None and cmp_params is not None: + file_diffs = [] + bench_param_names = set(bench_params.keys()) + cmp_param_names = set(cmp_params.keys()) + + for bench_param_name in bench_param_names: + matched_cmp_param_name = HyperparameterChecker._fuzzy_match_parameter(bench_param_name, cmp_params) + if matched_cmp_param_name: + bench_param_value = bench_params[bench_param_name] + cmp_param_value = cmp_params[matched_cmp_param_name] + if bench_param_value != cmp_param_value: + diff = compare_dict({bench_param_name: bench_param_value}, + {matched_cmp_param_name: cmp_param_value}) + if diff: + file_diffs.extend([f" Parameter '{bench_param_name}' (matched with '{matched_cmp_param_name}'): {d}" for d in diff]) + del cmp_params[matched_cmp_param_name] + else: + file_diffs.append(f" [Only in benchmark] Parameter: '{bench_param_name}': {bench_params[bench_param_name]}") + + for cmp_param_name, cmp_param_value in cmp_params.items(): + file_diffs.append(f" [Only in compare] Parameter: '{cmp_param_name}': {cmp_param_value}") + + if file_diffs: + all_diffs.append(f"File: {filename}") + all_diffs.extend(file_diffs) + + elif bench_params is not None: + all_diffs.append(f"[Only in benchmark] File: {filename}") + elif cmp_params is not None: + all_diffs.append(f"[Only in compare] File: {filename}") + + write_list_to_file(all_diffs, output_filepath) \ No newline at end of file