diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dcce618318e7afbf3510da029bfabdb8e3df9483 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +*venv/ +**/__pycache__/ +**/*.pyc + +# build +**/build/ +**/dist/ +**/*.egg-info/ +**/*.so +**/*.o + +.env + +# ide +.idea + +# Cython +*.c diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..1e3766022af21c4d3c650c4af9ed6dfe493d8e3b --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,17 @@ +{ + "python.languageServer": "Pylance", + "python.terminal.focusAfterLaunch": true, + "python.analysis.extraPaths": ["${workspaceFolder}/src"], + "python.analysis.typeCheckingMode": "standard", + "[python]": { + "editor.formatOnSave": false, + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.codeActionsOnSave": { + "source.fixAll": "never", + "source.organizeImports": "always" + } + }, + "pylint.cwd": "${workspaceFolder}/src", + "ruff.lint.enable": true, + "ruff.organizeImports": true +} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f6c26977bbb993b180afd759658dcf5ea6619cd0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,194 @@ +木兰宽松许可证,第2版 + +木兰宽松许可证,第2版 + +2020年1月 http://license.coscl.org.cn/MulanPSL2 + +您对“软件”的复制、使用、修改及分发受木兰宽松许可证,第2版(“本许可证”)的如下条款的约束: + +0. 定义 + +“软件” 是指由“贡献”构成的许可在“本许可证”下的程序和相关文档的集合。 + +“贡献” 是指由任一“贡献者”许可在“本许可证”下的受版权法保护的作品。 + +“贡献者” 是指将受版权法保护的作品许可在“本许可证”下的自然人或“法人实体”。 + +“法人实体” 是指提交贡献的机构及其“关联实体”。 + +“关联实体” 是指,对“本许可证”下的行为方而言,控制、受控制或与其共同受控制的机构,此处的控制是 +指有受控方或共同受控方至少50%直接或间接的投票权、资金或其他有价证券。 + +1. 授予版权许可 + +每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的版权许可,您可 +以复制、使用、修改、分发其“贡献”,不论修改与否。 + +2. 授予专利许可 + +每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的(根据本条规定 +撤销除外)专利许可,供您制造、委托制造、使用、许诺销售、销售、进口其“贡献”或以其他方式转移其“贡 +献”。前述专利许可仅限于“贡献者”现在或将来拥有或控制的其“贡献”本身或其“贡献”与许可“贡献”时的“软 +件”结合而将必然会侵犯的专利权利要求,不包括对“贡献”的修改或包含“贡献”的其他结合。如果您或您的“ +关联实体”直接或间接地,就“软件”或其中的“贡献”对任何人发起专利侵权诉讼(包括反诉或交叉诉讼)或 +其他专利维权行动,指控其侵犯专利权,则“本许可证”授予您对“软件”的专利许可自您提起诉讼或发起维权 +行动之日终止。 + +3. 无商标许可 + +“本许可证”不提供对“贡献者”的商品名称、商标、服务标志或产品名称的商标许可,但您为满足第4条规定 +的声明义务而必须使用除外。 + +4. 分发限制 + +您可以在任何媒介中将“软件”以源程序形式或可执行形式重新分发,不论修改与否,但您必须向接收者提供“ +本许可证”的副本,并保留“软件”中的版权、商标、专利及免责声明。 + +5. 免责声明与责任限制 + +“软件”及其中的“贡献”在提供时不带任何明示或默示的担保。在任何情况下,“贡献者”或版权所有者不对 +任何人因使用“软件”或其中的“贡献”而引发的任何直接或间接损失承担责任,不论因何种原因导致或者基于 +何种法律理论,即使其曾被建议有此种损失的可能性。 + +6. 语言 + +“本许可证”以中英文双语表述,中英文版本具有同等法律效力。如果中英文版本存在任何冲突不一致,以中文 +版为准。 + +条款结束 + +如何将木兰宽松许可证,第2版,应用到您的软件 + +如果您希望将木兰宽松许可证,第2版,应用到您的新软件,为了方便接收者查阅,建议您完成如下三步: + +1, 请您补充如下声明中的空白,包括软件名、软件的首次发表年份以及您作为版权人的名字; + +2, 请您在软件包的一级目录下创建以“LICENSE”为名的文件,将整个许可证文本放入该文件中; + +3, 请将如下声明文本放入每个源文件的头部注释中。 + +Copyright (c) [Year] [name of copyright holder] +[Software Name] is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan +PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. + +Mulan Permissive Software License,Version 2 + +Mulan Permissive Software License,Version 2 (Mulan PSL v2) + +January 2020 http://license.coscl.org.cn/MulanPSL2 + +Your reproduction, use, modification and distribution of the Software shall +be subject to Mulan PSL v2 (this License) with the following terms and +conditions: + +0. Definition + +Software means the program and related documents which are licensed under +this License and comprise all Contribution(s). + +Contribution means the copyrightable work licensed by a particular +Contributor under this License. + +Contributor means the Individual or Legal Entity who licenses its +copyrightable work under this License. + +Legal Entity means the entity making a Contribution and all its +Affiliates. + +Affiliates means entities that control, are controlled by, or are under +common control with the acting entity under this License, ‘control’ means +direct or indirect ownership of at least fifty percent (50%) of the voting +power, capital or other securities of controlled or commonly controlled +entity. + +1. Grant of Copyright License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to you a perpetual, worldwide, royalty-free, non-exclusive, +irrevocable copyright license to reproduce, use, modify, or distribute its +Contribution, with modification or not. + +2. Grant of Patent License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to you a perpetual, worldwide, royalty-free, non-exclusive, +irrevocable (except for revocation under this Section) patent license to +make, have made, use, offer for sale, sell, import or otherwise transfer its +Contribution, where such patent license is only limited to the patent claims +owned or controlled by such Contributor now or in future which will be +necessarily infringed by its Contribution alone, or by combination of the +Contribution with the Software to which the Contribution was contributed. +The patent license shall not apply to any modification of the Contribution, +and any other combination which includes the Contribution. If you or your +Affiliates directly or indirectly institute patent litigation (including a +cross claim or counterclaim in a litigation) or other patent enforcement +activities against any individual or entity by alleging that the Software or +any Contribution in it infringes patents, then any patent license granted to +you under this License for the Software shall terminate as of the date such +litigation or activity is filed or taken. + +3. No Trademark License + +No trademark license is granted to use the trade names, trademarks, service +marks, or product names of Contributor, except as required to fulfill notice +requirements in section 4. + +4. Distribution Restriction + +You may distribute the Software in any medium with or without modification, +whether in source or executable forms, provided that you provide recipients +with a copy of this License and retain copyright, patent, trademark and +disclaimer statements in the Software. + +5. Disclaimer of Warranty and Limitation of Liability + +THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY +KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR +COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT +LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING +FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO +MATTER HOW IT’S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGES. + +6. Language + +THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION +AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF +DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION +SHALL PREVAIL. + +END OF THE TERMS AND CONDITIONS + +How to Apply the Mulan Permissive Software License,Version 2 +(Mulan PSL v2) to Your Software + +To apply the Mulan PSL v2 to your work, for easy identification by +recipients, you are suggested to complete following three steps: + +i. Fill in the blanks in following statement, including insert your software +name, the year of the first publication of your software, and your name +identified as the copyright owner; + +ii. Create a file named "LICENSE" which contains the whole context of this +License in the first directory of your software package; + +iii. Attach the statement to the appropriate annotated syntax at the +beginning of each source file. + +Copyright (c) [Year] [name of copyright holder] +[Software Name] is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan +PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. diff --git a/distribution/build_rpm.sh b/distribution/build_rpm.sh new file mode 100644 index 0000000000000000000000000000000000000000..4fe2ddc8806d4f89679fc4e72e1fe823bc354d0e --- /dev/null +++ b/distribution/build_rpm.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Check if ~/rpmbuild directory exists; if not, run rpmdev-setuptree +if [ ! -d ~/rpmbuild ]; then + rpmdev-setuptree +fi + +# Run the Python script +python3 create_tarball.py + +# Find the generated tarball file and move it to ~/rpmbuild/SOURCES +generated_tarball=$(find . -maxdepth 1 -type f -name "*.tar.gz" -printf "%f\n") +mv "./$generated_tarball" ~/rpmbuild/SOURCES/ + +# Locate the spec file in the parent directory +spec_file="eulercopilot-cli.spec" + +if [[ ! -f "$spec_file" ]]; then + echo "Error: Could not find the spec file ($spec_file) in the parent directory." + exit 1 +fi + +# Remove old builds +rm -f ~/rpmbuild/RPMS/"$(uname -m)"/eulercopilot-cli-* + +# Build the RPM package using rpmbuild +rpmbuild --define "dist .oe2403" -bb "$spec_file" --nodebuginfo +# rpmbuild --define "_tag .a$(date +%s)" --define "dist .oe2203sp3" -bb "$spec_file" --nodebuginfo +# rpmbuild --define "_tag .beta3" --define "dist .oe2203sp3" -bb "$spec_file" --nodebuginfo +# rpmbuild --define "dist .oe2203sp3" -bb "$spec_file" --nodebuginfo diff --git a/distribution/create_tarball.py b/distribution/create_tarball.py new file mode 100644 index 0000000000000000000000000000000000000000..14923daff61d4372213cf0031b4aff7d30d41d90 --- /dev/null +++ b/distribution/create_tarball.py @@ -0,0 +1,70 @@ +import os +import re +import shutil +import tarfile + + +def extract_spec_fields(spec_file): + with open(spec_file, 'r', encoding='utf-8') as f: + content = f.read() + + name_pattern = re.compile(r'^Name:\s*(.+)$', re.MULTILINE) + version_pattern = re.compile(r'^Version:\s*(.+)$', re.MULTILINE) + + name_match = name_pattern.search(content) + version_match = version_pattern.search(content) + + if name_match and version_match: + return { + 'name': name_match.group(1).strip(), + 'version': version_match.group(1).strip() + } + else: + raise ValueError("Could not find Name or Version fields in the spec file") + + +def create_cache_folder(spec_info, src_dir): + name = spec_info['name'] + version = spec_info['version'] + + cache_folder_name = f"{name}-{version}" + cache_folder_path = os.path.join(os.path.dirname(src_dir), cache_folder_name) + + if not os.path.exists(cache_folder_path): + os.makedirs(cache_folder_path) + + copy_files(src_dir, cache_folder_path) + create_tarball(cache_folder_path, f"{cache_folder_name}.tar.gz") + delete_cache_folder(cache_folder_path) + + +def copy_files(src_dir, dst_dir): + for dirpath, _, files in os.walk(src_dir): + relative_path = os.path.relpath(dirpath, src_dir) + target_path = os.path.join(dst_dir, relative_path.strip(f'{os.curdir}{os.sep}')) + + if not os.path.exists(target_path): + os.makedirs(target_path) + + for file in files: + if file.endswith('.py') or file.endswith('.sh'): + src_file = os.path.join(dirpath, file) + dst_file = os.path.join(target_path, file) + os.link(src_file, dst_file) # 使用硬链接以节省空间和时间 + + +def create_tarball(folder_path, tarball_name): + with tarfile.open(tarball_name, "w:gz") as tar: + tar.add(folder_path, arcname=os.path.basename(folder_path)) + + +def delete_cache_folder(folder_path): + shutil.rmtree(folder_path) + + +if __name__ == "__main__": + SPEC_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), "eulercopilot-cli.spec")) + SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")) + + info = extract_spec_fields(SPEC_FILE) + create_cache_folder(info, SRC_DIR) diff --git a/distribution/eulercopilot-cli.spec b/distribution/eulercopilot-cli.spec new file mode 100644 index 0000000000000000000000000000000000000000..b8ee515bb2216a5334df832aace8e964276f1882 --- /dev/null +++ b/distribution/eulercopilot-cli.spec @@ -0,0 +1,68 @@ +%global debug_package %{nil} + +Name: eulercopilot-cli +Version: 1.2.1 +Release: 4%{?_tag}%{?dist} +Group: Applications/Utilities +Summary: openEuler Copilot System Command Line Assistant +Source: %{name}-%{version}.tar.gz +License: MulanPSL-2.0 +URL: https://www.openeuler.org/zh/ + +BuildRequires: python3-devel python3-setuptools +BuildRequires: python3-pip +BuildRequires: python3-Cython gcc + +Requires: python3 jq hostname + +%description +openEuler Copilot System Command Line Assistant + +%prep +%setup -q +python3 -m venv .venv +.venv/bin/python3 -m pip install -U pip setuptools +.venv/bin/python3 -m pip install -U Cython pyinstaller +.venv/bin/python3 -m pip install -U websockets requests +.venv/bin/python3 -m pip install -U rich typer questionary + +%build +.venv/bin/python3 setup.py build_ext +.venv/bin/pyinstaller --onefile --clean \ + --distpath=%{_builddir}/%{name}-%{version}/dist \ + --workpath=%{_builddir}/%{name}-%{version}/build \ + copilot.py + +%install +%define _unpackaged_files_terminate_build 0 +install -d %{buildroot}/%{_bindir} +install -c -m 0755 %{_builddir}/%{name}-%{version}/dist/copilot %{buildroot}/%{_bindir} +install -d %{buildroot}/etc/profile.d +install -c -m 0755 %{_builddir}/%{name}-%{version}/eulercopilot.sh %{buildroot}/etc/profile.d + +%files +%defattr(-,root,root,-) +/usr/bin/copilot +/etc/profile.d/eulercopilot.sh + +%pre +sed -i '/# >>> eulercopilot >>>/,/# <<< eulercopilot <<> /etc/bashrc +# >>> eulercopilot >>> +if type revert_copilot_prompt &> /dev/null && type set_copilot_prompt &> /dev/null; then + run_after_return() { + if [[ "$PS1" == *"\[\033[1;33m"* ]]; then + revert_copilot_prompt + set_copilot_prompt + fi + } + PROMPT_COMMAND="${PROMPT_COMMAND:+${PROMPT_COMMAND}; }run_after_return" + set_copilot_prompt +fi +# <<< eulercopilot <<< +EOF + +%postun +if [ ! -f /usr/bin/copilot ]; then + sed -i '/# >>> eulercopilot >>>/,/# <<< eulercopilot << bool: + patterns = [ + # 重定向 + r'\>|\<|\>\>|\<<', + # 管道 + r'\|', + # 通配符 + r'\*|\?', + # 美元符号开头的环境变量 + r'\$[\w_]+', + # 历史展开 + r'!', + # 后台运行符号 + r'&', + # 分号 + r';', + # 括号命令分组 + r'\(|\)|\{|\}', + # 逻辑操作符 + r'&&|\|\|', + # Shell函数或变量赋值 + r'\b\w+\s*=\s*[^=\s]+' + ] + + for pattern in patterns: + if re.search(pattern, cmd): + return True + return False + + +def execute_shell_command(cmd: str) -> int: + '''Execute a shell command and exit.''' + if check_shell_features(cmd): + try: + process = subprocess.Popen(cmd, shell=True) + except ValueError as e: + print(i18n.main_exec_value_error.format(error=e)) + return 1 + else: + try: + process = subprocess.Popen(shlex.split(cmd)) + except FileNotFoundError as e: + builtin_cmds = ['.', 'source', 'history', 'cd', 'export', 'alias', 'test'] + cmd_prefix = cmd.split()[0] + if cmd_prefix in builtin_cmds: + print(i18n.main_exec_builtin_cmd.format(cmd_prefix=cmd_prefix)) + else: + print(i18n.main_exec_not_found_error.format(error=e)) + return 1 + exit_code = process.wait() + return exit_code + + +def print_shell_commands(cmds: list): + console = Console() + with Live(console=console, vertical_overflow='visible') as live: + live.update( + Panel( + Markdown( + '```bash\n' + '\n\n'.join(cmds) + '\n```', + code_theme='github-dark' + ), + border_style='gray50' + ) + ) + + +def command_interaction_loop(cmds: list, service: llm_service.LLMService) -> int: + if not cmds: + return -1 + print_shell_commands(cmds) + while True: + action = interact.select_action(len(cmds) > 1) + if action in ('execute_all', 'execute_selected', 'execute'): + exit_code: int = 0 + selected_cmds = get_selected_cmds(cmds, action) + if not selected_cmds: + return -1 + for cmd in selected_cmds: + exit_code = execute_shell_command(cmd) + if exit_code != 0: + print( + i18n.main_exec_cmd_failed_with_exit_code.format( + cmd=cmd, + exit_code=exit_code + ) + ) + break + return -1 + if action == 'explain': + service.explain_shell_command(select_one_cmd(cmds)) + elif action == 'edit': + i = select_one_cmd_with_index(cmds) + readline.set_startup_hook(lambda: readline.insert_text(cmds[i])) + try: + cmds[i] = input() + finally: + readline.set_startup_hook() + print_shell_commands(cmds) + elif action == 'cancel': + return -1 + + +def get_selected_cmds(cmds: list, action: str) -> list: + if action in ('execute', 'execute_all'): + return cmds + if action == 'execute_selected': + return interact.select_multiple_commands(cmds) + return [] + + +def select_one_cmd(cmds: list) -> str: + if len(cmds) == 1: + return cmds[0] + return interact.select_command(cmds) + + +def select_one_cmd_with_index(cmds: list) -> int: + if len(cmds) == 1: + return 0 + return interact.select_command_with_index(cmds) + + +def handle_user_input(service: llm_service.LLMService, + user_input: str, mode: str) -> int: + '''Process user input based on the given flag and backend configuration.''' + cmds: list = [] + if mode == 'chat': + cmds = service.get_shell_commands(user_input) + if isinstance(service, framework_api.Framework): + if mode == 'flow': + cmds = service.flow(user_input, selected_plugins) + if mode == 'diagnose': + cmds = service.diagnose(user_input) + if mode == 'tuning': + cmds = service.tuning(user_input) + if cmds: + return command_interaction_loop(cmds, service) + return -1 + + +def edit_config(): + console = Console() + with Live(console=console) as live: + live.update( + Panel(Markdown(config_to_markdown(), code_theme='github-dark'), + border_style='gray50')) + while True: + selected_entry = interact.select_settings_entry() + if selected_entry == 'cancel': + return + if selected_entry == 'backend': + backend = interact.select_backend() + if selected_entry != 'cancel': + update_config(selected_entry, backend) + elif selected_entry == 'query_mode': + backend = load_config().get('backend', '') + update_config(selected_entry, interact.select_query_mode(backend)) + elif selected_entry in ('advanced_mode', 'debug_mode'): + input_prompt = i18n.interact_question_yes_or_no.format( + question_body=CONFIG_ENTRY_NAME.get(selected_entry)) + update_config(selected_entry, interact.ask_boolean(input_prompt)) + else: + original_text: str = load_config().get(selected_entry, '') + new_text = '' + input_prompt = i18n.interact_question_input_text.format( + question_body=CONFIG_ENTRY_NAME.get(selected_entry)) + stylized_input_prompt = Text('❯ ', style='#005f87 bold')\ + .append(input_prompt, style='bold') + readline.set_startup_hook(lambda: readline.insert_text(original_text)) + try: + new_text = console.input(stylized_input_prompt) + finally: + readline.set_startup_hook() + update_config(selected_entry, new_text) + + +# pylint: disable=W0603 +def main(user_input: Optional[str], config: dict) -> int: + global selected_plugins + backend = config.get('backend') + mode = str(config.get('query_mode')) + service: Optional[llm_service.LLMService] = None + if backend == 'framework': + service = framework_api.Framework( + url=config.get('framework_url'), + api_key=config.get('framework_api_key'), + debug_mode=config.get('debug_mode', False) + ) + service.update_session_id() # get "ECSESSION" cookie + service.create_new_conversation() # get conversation_id from backend + if mode == 'flow': # get plugin list from current backend + plugins: list[framework_api.PluginData] = service.get_plugins() + if not plugins: + print(f'\033[1;31m{i18n.main_service_framework_plugin_is_none}\033[0m') + return 1 + selected_plugins = [interact.select_one_plugin(plugins)] + elif backend == 'spark': + service = spark_api.Spark( + app_id=config.get('spark_app_id'), + api_key=config.get('spark_api_key'), + api_secret=config.get('spark_api_secret'), + spark_url=config.get('spark_url'), + domain=config.get('spark_domain') + ) + elif backend == 'openai': + service = openai_api.ChatOpenAI( + url=str(config.get('model_url')), + api_key=config.get('model_api_key'), + model=config.get('model_name') + ) + + if service is None: + print(f'\033[1;31m{i18n.main_service_is_none}\033[0m') + return 1 + + print(f'\033[33m{i18n.main_exit_prompt}\033[0m') + + try: + while True: + if user_input is None: + user_input = input('\033[35m❯\033[0m ') + if user_input.lower().startswith('exit'): + return 0 + exit_code = handle_user_input(service, user_input, mode) + if exit_code != -1: + return exit_code + user_input = None # Reset user_input for next iteration (only if continuing service) + except KeyboardInterrupt: + if isinstance(service, framework_api.Framework): + service.stop() + print() + return 0 diff --git a/src/copilot/app/copilot_cli.py b/src/copilot/app/copilot_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..caac80d3695c6d96feae5c5dfacef03245b40722 --- /dev/null +++ b/src/copilot/app/copilot_cli.py @@ -0,0 +1,163 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +# pylint: disable=R0911,R0912,R0913 + +import os +import sys +from typing import Optional + +import typer + +from copilot.app.copilot_app import edit_config, main +from copilot.app.copilot_init import setup_copilot +from copilot.utilities.config_manager import ( + CONFIG_PATH, + DEFAULT_CONFIG, + QUERY_MODE_NAME, + load_config, + select_backend, + select_query_mode, +) +from copilot.utilities.i18n import ( + BRAND_NAME, + cli_help_panel_advanced_options, + cli_help_panel_switch_mode, + cli_help_prompt_edit_settings, + cli_help_prompt_init_settings, + cli_help_prompt_question, + cli_help_prompt_select_backend, + cli_help_prompt_switch_mode, + cli_notif_compatibility, + cli_notif_no_config, + cli_notif_select_one_mode, +) + +CONFIG: dict = load_config() +BACKEND: str = CONFIG.get('backend', DEFAULT_CONFIG['backend']) +ADVANCED_MODE: bool = CONFIG.get('advanced_mode', DEFAULT_CONFIG['advanced_mode']) +DEBUG_MODE: bool = CONFIG.get('debug_mode', DEFAULT_CONFIG['debug_mode']) +CONFIG_INITIALIZED: bool = os.path.exists(CONFIG_PATH) + +app = typer.Typer( + context_settings={ + 'help_option_names': ['-h', '--help'], + 'allow_interspersed_args': True + }, + pretty_exceptions_show_locals=DEBUG_MODE, + add_completion=False +) + + +@app.command() +def cli( + question: Optional[str] = typer.Argument( + None, show_default=False, + help=cli_help_prompt_question), + chat: bool = typer.Option( + False, '--chat', '-c', + help=cli_help_prompt_switch_mode.format(mode=QUERY_MODE_NAME["chat"]), + rich_help_panel=cli_help_panel_switch_mode + ), + flow: bool = typer.Option( + False, '--plugin', '-p', + help=cli_help_prompt_switch_mode.format(mode=QUERY_MODE_NAME["flow"]), + rich_help_panel=cli_help_panel_switch_mode, + hidden=(BACKEND != 'framework'), + ), + diagnose: bool = typer.Option( + False, '--diagnose', '-d', + help=cli_help_prompt_switch_mode.format(mode=QUERY_MODE_NAME["diagnose"]), + rich_help_panel=cli_help_panel_switch_mode, + hidden=(BACKEND != 'framework') + ), + tuning: bool = typer.Option( + False, '--tuning', '-t', + help=cli_help_prompt_switch_mode.format(mode=QUERY_MODE_NAME["tuning"]), + rich_help_panel=cli_help_panel_switch_mode, + hidden=(BACKEND != 'framework') + ), + init: bool = typer.Option( + False, '--init', + help=cli_help_prompt_init_settings, + hidden=(CONFIG_INITIALIZED) + ), + backend: bool = typer.Option( + False, '--backend', + help=cli_help_prompt_select_backend, + rich_help_panel=cli_help_panel_advanced_options, + hidden=(not ADVANCED_MODE) + ), + settings: bool = typer.Option( + False, '--settings', + help=cli_help_prompt_edit_settings, + rich_help_panel=cli_help_panel_advanced_options, + hidden=(not ADVANCED_MODE) + ) +) -> int: + '''openEuler Copilot System CLI\n\nPress Ctrl+O to ask a question''' + if init: + setup_copilot() + return 0 + if not CONFIG_INITIALIZED: + print(f'\033[1;31m{cli_notif_no_config}\033[0m') + return 1 + if backend: + if ADVANCED_MODE: + select_backend() + return 0 + if settings: + if ADVANCED_MODE: + edit_config() + return 0 + + if sum(map(bool, [chat, flow, diagnose, tuning])) > 1: + print(f'\033[1;31m{cli_notif_select_one_mode}\033[0m') + return 1 + + if chat: + select_query_mode(0) + if not question: + return 0 + elif flow: + if BACKEND == 'framework': + select_query_mode(1) + if not question: + return 0 + else: + compatibility_notification(QUERY_MODE_NAME['flow']) + return 1 + elif diagnose: + if BACKEND == 'framework': + select_query_mode(2) + if not question: + return 0 + else: + compatibility_notification(QUERY_MODE_NAME['diagnose']) + return 1 + elif tuning: + if BACKEND == 'framework': + select_query_mode(3) + if not question: + return 0 + else: + compatibility_notification(QUERY_MODE_NAME['tuning']) + return 1 + + if question: + question = question.strip() + + return main(question, load_config()) + + +def compatibility_notification(mode: str): + print('\033[33m', cli_notif_compatibility.format(mode=mode, brand_name=BRAND_NAME), + '\033[0m', sep='') + + +def entry_point() -> int: + return app() + + +if __name__ == '__main__': + code = entry_point() + sys.exit(code) diff --git a/src/copilot/app/copilot_init.py b/src/copilot/app/copilot_init.py new file mode 100644 index 0000000000000000000000000000000000000000..655e8bf7aa89ff1ba8a842e19118593efe8f7e3f --- /dev/null +++ b/src/copilot/app/copilot_init.py @@ -0,0 +1,64 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +# pylint: disable=W0611 + +import os +import readline # noqa: F401 + +from rich import print as rprint + +from copilot.utilities import config_manager, i18n + + +def setup_copilot(): + def _init_config(): + if not os.path.exists(config_manager.CONFIG_DIR): + os.makedirs(config_manager.CONFIG_DIR) + if not os.path.exists(config_manager.CONFIG_PATH): + config_manager.init_config() + + def _prompt_for_config(config_key: str, prompt_text: str) -> str: + config_value = input(prompt_text) + config_manager.update_config(config_key, config_value) + return config_value + + if not os.path.exists(config_manager.CONFIG_PATH): + _init_config() + + rprint(f'\n[bold]{i18n.settings_init_welcome_msg.format(brand_name=i18n.BRAND_NAME)}[/bold]\n') + rprint(i18n.settings_init_welcome_usage_guide + '\n') + rprint(i18n.settings_init_welcome_help_hint) + rprint(i18n.settings_init_welcome_docs_link.format(url=i18n.DOCS_URL) + '\n') + + config = config_manager.load_config() + if config.get('backend') == 'spark': + if config.get('spark_app_id') == '': + _prompt_for_config('spark_app_id', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_spark_app_id)) + if config.get('spark_api_key') == '': + _prompt_for_config('spark_api_key', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_spark_api_key)) + if config.get('spark_api_secret') == '': + _prompt_for_config('spark_api_secret', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_spark_api_secret)) + if config.get('backend') == 'framework': + framework_url = config.get('framework_url') + if framework_url == '': + framework_url = _prompt_for_config('framework_url', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_framework_url)) + if config.get('framework_api_key') == '': + title = i18n.settings_init_framework_api_key_notice_title.format(brand_name=i18n.BRAND_NAME) + rprint(f'[bold]{title}[/bold]') + rprint(i18n.settings_init_framework_api_key_notice_content.format(url=framework_url)) + _prompt_for_config('framework_api_key', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_framework_api_key.format(brand_name=i18n.BRAND_NAME))) + if config.get('backend') == 'openai': + if config.get('model_url') == '': + _prompt_for_config('model_url', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_model_url)) + if config.get('model_api_key') == '': + _prompt_for_config('model_api_key', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_model_api_key)) + if config.get('model_name') == '': + _prompt_for_config('model_name', i18n.interact_question_input_text.format( + question_body=i18n.settings_config_entry_model_name)) diff --git a/src/copilot/backends/framework_api.py b/src/copilot/backends/framework_api.py new file mode 100644 index 0000000000000000000000000000000000000000..c3210e69f6726f8bac73c86df970f1ebd16921af --- /dev/null +++ b/src/copilot/backends/framework_api.py @@ -0,0 +1,381 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import json +import re +import socket +import subprocess +from dataclasses import dataclass +from typing import Optional +from urllib.parse import urljoin + +import requests +from rich.console import Console +from rich.live import Live +from rich.spinner import Spinner + +from copilot.backends.llm_service import LLMService +from copilot.utilities.i18n import ( + BRAND_NAME, + backend_framework_auth_invalid_api_key, + backend_framework_request_connection_error, + backend_framework_request_exceptions, + backend_framework_request_timeout, + backend_framework_request_too_many_requests, + backend_framework_request_unauthorized, + backend_framework_response_ended_prematurely, + backend_framework_stream_error, + backend_framework_stream_sensitive, + backend_framework_stream_stop, + backend_framework_stream_unknown, + backend_framework_sugggestion, + backend_general_request_failed, + prompt_framework_extra_install, + prompt_framework_keyword_install, + prompt_framework_markdown_format, + prompt_framework_plugin_ip, +) +from copilot.utilities.markdown_renderer import MarkdownRenderer +from copilot.utilities.shell_script import write_shell_script + +FRAMEWORK_LLM_STREAM_BAD_REQUEST_MSG = { + 401: backend_framework_request_unauthorized, + 429: backend_framework_request_too_many_requests +} + + +# pylint: disable=R0902 +class Framework(LLMService): + def __init__(self, url, api_key, debug_mode=False): + self.endpoint: str = url + self.api_key: str = api_key + self.debug_mode: bool = debug_mode + # 临时数据 (本轮对话) + self.session_id: str = '' + self.plugins: list = [] + self.conversation_id: str = '' + # 临时数据 (本次问答) + self.content: str = '' + self.commands: list = [] + self.sugggestion: str = '' + # 富文本显示 + self.console = Console() + + def get_shell_commands(self, question: str) -> list: + query = self._add_framework_extra_prompt(question) + if prompt_framework_keyword_install in question.lower(): + query = self._add_framework_software_install_prompt(query) + self._query_llm_service(query) + if self.commands: + return self.commands + return self._extract_shell_code_blocks(self.content) + + def explain_shell_command(self, cmd: str): + query = self._gen_explain_cmd_prompt(cmd) + self._query_llm_service(query, show_suggestion=False) + + def update_session_id(self): + headers = self._get_headers() + try: + response = requests.post( + urljoin(self.endpoint, 'api/client/session'), + json={'session_id': self.session_id} if self.session_id else {}, + headers=headers, + timeout=30 + ) + except requests.exceptions.RequestException: + self.console.print(backend_framework_request_exceptions.format(brand_name=BRAND_NAME)) + return + if response.status_code == 401: + self.console.print(backend_framework_auth_invalid_api_key.format(brand_name=BRAND_NAME)) + return + if response.status_code != 200: + self.console.print(backend_general_request_failed.format(code=response.status_code)) + return + self.session_id = response.json().get('result', {}).get('session_id', '') + + def create_new_conversation(self): + headers = self._get_headers() + try: + response = requests.post( + urljoin(self.endpoint, 'api/client/conversation'), + headers=headers, + timeout=30 + ) + except requests.exceptions.RequestException: + self.console.print(backend_framework_request_exceptions.format(brand_name=BRAND_NAME)) + return + if response.status_code == 401: + self.console.print(backend_framework_auth_invalid_api_key.format(brand_name=BRAND_NAME)) + return + if response.status_code != 200: + self.console.print(backend_general_request_failed.format(code=response.status_code)) + return + self.conversation_id = response.json().get('result', {}).get('conversation_id', '') + + def get_plugins(self) -> list: + headers = self._get_headers() + try: + response = requests.get( + urljoin(self.endpoint, 'api/client/plugin'), + headers=headers, + timeout=30 + ) + except requests.exceptions.RequestException: + self.console.print(backend_framework_request_exceptions.format(brand_name=BRAND_NAME)) + return [] + if response.status_code == 401: + self.console.print(backend_framework_auth_invalid_api_key.format(brand_name=BRAND_NAME)) + return [] + if response.status_code != 200: + self.console.print(backend_general_request_failed.format(code=response.status_code)) + return [] + self.session_id = self._reset_session_from_cookie(response.headers.get('set-cookie', '')) + plugins = response.json().get('result', []) + if plugins: + self.plugins = [PluginData(**plugin) for plugin in plugins] + return self.plugins + + def flow(self, question: str, plugins: list) -> list: + self._query_llm_service(question, user_selected_plugins=plugins) + if self.commands: + return self.commands + return self._extract_shell_code_blocks(self.content) + + def diagnose(self, question: str) -> list: + # 确保用户输入的问题中包含有效的IP地址,若没有,则诊断本机 + if not self._contains_valid_ip(question): + local_ip = self._get_local_ip() + if local_ip: + question = f'{prompt_framework_plugin_ip} {local_ip},' + question + self._query_llm_service(question, user_selected_plugins=['euler-copilot-rca']) + if self.commands: + return self.commands + return self._extract_shell_code_blocks(self.content) + + def tuning(self, question: str) -> list: + # 确保用户输入的问题中包含有效的IP地址,若没有,则调优本机 + if not self._contains_valid_ip(question): + local_ip = self._get_local_ip() + if local_ip: + question = f'{prompt_framework_plugin_ip} {local_ip},' + question + self._query_llm_service(question, user_selected_plugins=['euler-copilot-tune']) + if self.commands: + return self.commands + return self._extract_shell_code_blocks(self.content) + + def stop(self): + headers = self._get_headers() + try: + response = requests.post( + urljoin(self.endpoint, 'api/client/stop'), + headers=headers, + timeout=30 + ) + except requests.exceptions.RequestException: + return + if response.status_code == 200: + self.console.print(backend_framework_stream_stop.format(brand_name=BRAND_NAME)) + + # pylint: disable=W0221 + def _query_llm_service( + self, + question: str, + user_selected_plugins: Optional[list] = None, + show_suggestion: bool = True + ): + if not user_selected_plugins: + user_selected_plugins = [] + headers = self._get_headers() + self.update_session_id() + data = { + 'session_id': self.session_id, + 'question': question, + 'language': 'zh', + 'conversation_id': self.conversation_id, + 'user_selected_plugins': user_selected_plugins + } + self._stream_response(headers, data, show_suggestion) + + def _stream_response(self, headers, data, show_suggestion: bool = True): + self._clear_previous_data() + spinner = Spinner('material') + with Live(console=self.console) as live: + live.update(spinner, refresh=True) + try: + response = requests.post( + urljoin(self.endpoint, 'api/client/chat'), + headers=headers, + json=data, + stream=True, + timeout=300 + ) + except requests.exceptions.ConnectionError: + live.update( + backend_framework_request_connection_error.format(brand_name=BRAND_NAME), refresh=True) + return + except requests.exceptions.Timeout: + live.update( + backend_framework_request_timeout.format(brand_name=BRAND_NAME), refresh=True) + return + except requests.exceptions.RequestException: + live.update( + backend_framework_request_exceptions.format(brand_name=BRAND_NAME), refresh=True) + return + if response.status_code != 200: + msg = FRAMEWORK_LLM_STREAM_BAD_REQUEST_MSG.get( + response.status_code, + backend_general_request_failed.format(code=response.status_code) + ) + live.update(msg, refresh=True) + return + self.session_id = self._reset_session_from_cookie(response.headers.get('set-cookie', '')) + try: + self._handle_response_stream(live, response, show_suggestion) + except requests.exceptions.ChunkedEncodingError: + live.update(backend_framework_response_ended_prematurely, refresh=True) + + def _clear_previous_data(self): + self.content = '' + self.commands = [] + self.sugggestion = '' + + def _handle_response_stream( + self, + live: Live, + response: requests.Response, + show_suggestion: bool + ): + for line in response.iter_lines(): + if line is None: + continue + content = line.decode('utf-8').strip('data: ') + try: + jcontent = json.loads(content) + except json.JSONDecodeError: + if content == '': + continue + if content == '[ERROR]': + if not self.content: + MarkdownRenderer.update( + live, + backend_framework_stream_error.format(brand_name=BRAND_NAME) + ) + elif content == '[SENSITIVE]': + MarkdownRenderer.update(live, backend_framework_stream_sensitive) + self.content = '' + elif content != '[DONE]': + if not self.debug_mode: + continue + MarkdownRenderer.update( + live, + backend_framework_stream_unknown.format( + brand_name=BRAND_NAME, + content=content + ) + ) + break + else: + self._handle_json_chunk(jcontent, live, show_suggestion) + + def _handle_json_chunk(self, jcontent, live: Live, show_suggestion: bool): + chunk = jcontent.get('content', '') + self.content += chunk + # 获取推荐问题 + if show_suggestion: + suggestions = jcontent.get('search_suggestions', []) + if suggestions: + suggested_plugin = suggestions[0].get('name', '') + suggested_question = suggestions[0].get('question', '') + if suggested_plugin and suggested_question: + self.sugggestion = f'**{suggested_plugin}** {suggested_question}' + elif suggested_question: + self.sugggestion = suggested_question + # 获取插件返回数据 + plugin_tool_type = jcontent.get('type', '') + if plugin_tool_type == 'extract': + data = jcontent.get('data', '') + if data: + if isinstance(data, str): + try: + data = json.loads(data) + except json.JSONDecodeError: + return + # 返回 Markdown 报告 + output = data.get('output', '') + if output: + self.content = output + # 返回单行 Shell 命令 + cmd = data.get('shell', '') + if cmd: + self.commands.append(cmd) + # 返回 Shell 脚本 + script = data.get('script', '') + if script: + self.commands.append(write_shell_script(script)) + # 刷新终端 + if not self.sugggestion: + MarkdownRenderer.update(live, self.content) + else: + MarkdownRenderer.update( + live, + content=self.content, + sugggestion=backend_framework_sugggestion.format(sugggestion=self.sugggestion), + ) + + def _get_headers(self) -> dict: + return { + 'Accept': '*/*', + 'Content-Type': 'application/json; charset=UTF-8', + 'Connection': 'keep-alive', + 'Authorization': f'Bearer {self.api_key}', + 'Cookie': f'ECSESSION={self.session_id};' if self.session_id else '', + } + + def _reset_session_from_cookie(self, cookie: str) -> str: + if not cookie: + return '' + for item in cookie.split(';'): + item = item.strip() + if item.startswith('ECSESSION'): + return item.split('=')[1] + return '' + + def _contains_valid_ip(self, text: str) -> bool: + ip_pattern = re.compile( + r'(? str: + try: + process = subprocess.run( + ['hostname', '-I'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True) + except (FileNotFoundError, subprocess.CalledProcessError): + try: + ip_list = socket.gethostbyname_ex(socket.gethostname())[2] + except socket.gaierror: + return '' + return ip_list[-1] + if process.stdout: + ip_address = process.stdout.decode('utf-8').strip().split(' ', maxsplit=1)[0] + return ip_address + return '' + + def _add_framework_extra_prompt(self, query: str) -> str: + return query + '\n\n' + prompt_framework_markdown_format + + def _add_framework_software_install_prompt(self, query: str) -> str: + return query + '\n\n' + prompt_framework_extra_install.format( + prompt_general_root=self._gen_sudo_prompt()) + + +@dataclass +class PluginData: + id: str + plugin_name: str + plugin_description: str + plugin_auth: Optional[dict] = None diff --git a/src/copilot/backends/llm_service.py b/src/copilot/backends/llm_service.py new file mode 100644 index 0000000000000000000000000000000000000000..acd4459a43d4a3f331d4987de783f612a9c74886 --- /dev/null +++ b/src/copilot/backends/llm_service.py @@ -0,0 +1,56 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import re +from abc import ABC, abstractmethod + +from copilot.utilities.env_info import get_os_info, is_root +from copilot.utilities.i18n import ( + prompt_general_chat, + prompt_general_explain_cmd, + prompt_general_root_false, + prompt_general_root_true, + prompt_general_system, +) + + +class LLMService(ABC): + @abstractmethod + def get_shell_commands(self, question: str) -> list: + pass + + def explain_shell_command(self, cmd: str): + query = self._gen_explain_cmd_prompt(cmd) + self._query_llm_service(query) + + @abstractmethod + def _query_llm_service(self, question: str, *args, **kwargs): + pass + + def _extract_shell_code_blocks(self, markdown_text) -> list: + pattern = r'```(bash|sh|shell)\n(.*?)(?=\n\s*```)' + bash_blocks = re.findall(pattern, markdown_text, re.DOTALL | re.MULTILINE) + cmds = list(dict.fromkeys('\n'.join([block[1].strip() for block in bash_blocks]).splitlines())) + return [cmd for cmd in cmds if cmd and not cmd.startswith('#')] # remove comments and empty lines + + def _get_context_length(self, context: list) -> int: + length = 0 + for content in context: + temp = content['content'] + leng = len(temp) + length += leng + return length + + def _gen_sudo_prompt(self) -> str: + if is_root(): + return prompt_general_root_true + return prompt_general_root_false + + def _gen_system_prompt(self) -> str: + return prompt_general_system.format( + os=get_os_info(), prompt_general_root=self._gen_sudo_prompt()) + + def _gen_chat_prompt(self, question: str) -> str: + return prompt_general_chat.format(question=question, os=get_os_info()) + + def _gen_explain_cmd_prompt(self, cmd: str) -> str: + return prompt_general_explain_cmd.format(cmd=cmd) diff --git a/src/copilot/backends/openai_api.py b/src/copilot/backends/openai_api.py new file mode 100644 index 0000000000000000000000000000000000000000..d44e9f6e947bfb470d53eda2564944bed92db075 --- /dev/null +++ b/src/copilot/backends/openai_api.py @@ -0,0 +1,110 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import json +from typing import Optional + +import requests +from rich.console import Console +from rich.live import Live +from rich.spinner import Spinner + +from copilot.backends.llm_service import LLMService +from copilot.utilities.i18n import ( + backend_general_request_failed, + backend_openai_request_connection_error, + backend_openai_request_exceptions, + backend_openai_request_timeout, +) +from copilot.utilities.markdown_renderer import MarkdownRenderer + + +class ChatOpenAI(LLMService): + def __init__(self, url: str, api_key: Optional[str], model: Optional[str], max_tokens = 2048): + self.url: str = url + self.api_key: Optional[str] = api_key + self.model: Optional[str] = model + self.max_tokens: int = max_tokens + self.answer: str = '' + self.history: list = [] + # 富文本显示 + self.console = Console() + + def get_shell_commands(self, question: str) -> list: + query = self._gen_chat_prompt(question) + self._query_llm_service(query) + return self._extract_shell_code_blocks(self.answer) + + # pylint: disable=W0221 + def _query_llm_service(self, question: str): + self._stream_response(question) + + def _check_len(self, context: list) -> list: + while self._get_context_length(context) > self.max_tokens / 2: + del context[0] + return context + + def _gen_params(self, query: str, stream: bool = True): + self.history.append({'content': query, 'role': 'user'}) + history = self._check_len( + self.history if len(self.history) < 5 else self.history[-5:] + ) + history.insert(0, {'content': self._gen_system_prompt(), 'role': 'system'}) + return { + 'messages': history, + 'model': self.model, + 'stream': stream, + 'max_tokens': self.max_tokens, + 'temperature': 0.1, + 'top_p': 0.95 + } + + def _gen_headers(self): + return { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {self.api_key}' + } + + def _stream_response(self, query: str): + spinner = Spinner('material') + self.answer = '' + with Live(console=self.console) as live: + live.update(spinner, refresh=True) + try: + response = requests.post( + self.url, + headers=self._gen_headers(), + data=json.dumps(self._gen_params(query)), + stream=True, + timeout=60 + ) + except requests.exceptions.ConnectionError: + live.update(backend_openai_request_connection_error, refresh=True) + return + except requests.exceptions.Timeout: + live.update(backend_openai_request_timeout, refresh=True) + return + except requests.exceptions.RequestException: + live.update(backend_openai_request_exceptions, refresh=True) + return + if response.status_code != 200: + live.update(backend_general_request_failed.format(code=response.status_code), refresh=True) + return + for line in response.iter_lines(): + if line is None: + continue + content = line.decode('utf-8').strip('data: ') + try: + jcontent = json.loads(content) + except json.JSONDecodeError: + continue + else: + choices = jcontent.get('choices', []) + if choices: + delta = choices[0].get('delta', {}) + chunk = delta.get('content', '') + finish_reason = choices[0].get('finish_reason') + self.answer += chunk + MarkdownRenderer.update(live, self.answer) + if finish_reason == 'stop': + self.history.append({'content': self.answer, 'role': 'assistant'}) + break diff --git a/src/copilot/backends/spark_api.py b/src/copilot/backends/spark_api.py new file mode 100644 index 0000000000000000000000000000000000000000..7f5282c5dc5242a5d79208f47bd6fe623513179e --- /dev/null +++ b/src/copilot/backends/spark_api.py @@ -0,0 +1,168 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import asyncio +import base64 +import hashlib +import hmac +import json +from datetime import datetime +from time import mktime +from urllib.parse import urlencode, urlparse +from wsgiref.handlers import format_date_time + +import websockets +from rich.console import Console +from rich.live import Live +from rich.spinner import Spinner +from rich.text import Text + +from copilot.backends.llm_service import LLMService +from copilot.utilities.i18n import ( + backend_spark_network_error, + backend_spark_stream_error, + backend_spark_websockets_exceptions_msg_a, + backend_spark_websockets_exceptions_msg_b, + backend_spark_websockets_exceptions_msg_c, + backend_spark_websockets_exceptions_msg_title, +) +from copilot.utilities.markdown_renderer import MarkdownRenderer + + +# pylint: disable=R0902 +class Spark(LLMService): + # pylint: disable=R0913 + def __init__(self, app_id, api_key, api_secret, spark_url, domain, max_tokens=4096): + self.app_id: str = app_id + self.api_key: str = api_key + self.api_secret: str = api_secret + self.spark_url: str = spark_url + self.host = urlparse(spark_url).netloc + self.path = urlparse(spark_url).path + self.domain: str = domain + self.max_tokens: int = max_tokens + self.answer: str = '' + self.history: list = [] + # 富文本显示 + self.console = Console() + + def get_shell_commands(self, question: str) -> list: + query = self._gen_chat_prompt(question) + self._query_llm_service(query) + return self._extract_shell_code_blocks(self.answer) + + # pylint: disable=W0221 + def _query_llm_service(self, question: str): + asyncio.get_event_loop().run_until_complete( + self._query_spark_ai(question) + ) + + async def _query_spark_ai(self, query: str): + url = self._create_url() + self.answer = '' + spinner = Spinner('material') + with Live(console=self.console) as live: + live.update(spinner, refresh=True) + try: + async with websockets.connect(url) as websocket: + data = json.dumps(self._gen_params(query)) + await websocket.send(data) + + while True: + try: + message = await websocket.recv() + data = json.loads(message) + code = data['header']['code'] + if code != 0: + message = data['header']['message'] + live.update(backend_spark_stream_error.format( + code=code, + message=message + ), refresh=True) + await websocket.close() + else: + choices = data['payload']['choices'] + status = choices['status'] + content = choices['text'][0]['content'] + self.answer += content + MarkdownRenderer.update(live, self.answer) + if status == 2: + self.history.append({'role': 'assistant', 'content': self.answer}) + break + except websockets.exceptions.ConnectionClosed: + break + + except websockets.exceptions.InvalidStatusCode: + live.update( + Text.from_ansi(f'\033[1;31m{backend_spark_websockets_exceptions_msg_title}\033[0m\n\n')\ + .append(backend_spark_websockets_exceptions_msg_a)\ + .append(backend_spark_websockets_exceptions_msg_b)\ + .append(backend_spark_websockets_exceptions_msg_c.format(spark_url=self.spark_url)), + refresh=True + ) + except Exception: # pylint: disable=W0718 + live.update(backend_spark_network_error) + + def _create_url(self): + now = datetime.now() # 生成RFC1123格式的时间戳 + date = format_date_time(mktime(now.timetuple())) + + signature_origin = f'host: {self.host}\ndate: {date}\nGET {self.path} HTTP/1.1' + + # 进行hmac-sha256进行加密 + signature_sha = hmac.new(self.api_secret.encode('utf-8'), + signature_origin.encode('utf-8'), + digestmod=hashlib.sha256).digest() + + signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8') + + authorization_origin = f'api_key="{self.api_key}", algorithm="hmac-sha256", ' + \ + f'headers="host date request-line", signature="{signature_sha_base64}"' + + authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8') + + # 将请求的鉴权参数组合为字典 + v = { + 'authorization': authorization, + 'date': date, + 'host': self.host + } + # 拼接鉴权参数,生成url + url = self.spark_url + '?' + urlencode(v) + # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释,比对相同参数时生成的url与自己代码生成的url是否一致 + return url + + def _check_len(self, context: list) -> list: + while self._get_context_length(context) > self.max_tokens / 2: + del context[0] + return context + + def _gen_params(self, query: str): + ''' + 通过appid和用户的提问来生成请参数 + ''' + self.history.append({'role': 'user', 'content': query}) + history = self._check_len( + self.history if len(self.history) < 5 else self.history[-5:] + ) + if self.domain == 'generalv3.5': + history.insert(0, {'role': 'system', 'content': self._gen_system_prompt()}) + data = { + 'header': { + 'app_id': self.app_id, + 'uid': '1234', + }, + 'parameter': { + 'chat': { + 'domain': self.domain, + 'temperature': 0.5, + 'max_tokens': self.max_tokens, + 'auditing': 'default', + } + }, + 'payload': { + 'message': { + 'text': history + } + } + } + return data diff --git a/src/copilot/utilities/config_manager.py b/src/copilot/utilities/config_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..0dac12ae7351344db72c6f26cd659df773937613 --- /dev/null +++ b/src/copilot/utilities/config_manager.py @@ -0,0 +1,118 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import json +import os + +from copilot.utilities import i18n, interact + +CONFIG_DIR = os.path.join(os.path.expanduser('~'), '.config/eulercopilot') +CONFIG_PATH = os.path.join(CONFIG_DIR, 'config.json') + +CONFIG_ENTRY_NAME = { + 'backend': i18n.settings_config_entry_backend, + 'query_mode': i18n.settings_config_entry_query_mode, + 'advanced_mode': i18n.settings_config_entry_advanced_mode, + 'debug_mode': i18n.settings_config_entry_debug_mode, + 'spark_app_id': i18n.settings_config_entry_spark_app_id, + 'spark_api_key': i18n.settings_config_entry_spark_api_key, + 'spark_api_secret': i18n.settings_config_entry_spark_api_secret, + 'spark_url': i18n.settings_config_entry_spark_url, + 'spark_domain': i18n.settings_config_entry_spark_domain, + 'framework_url': i18n.settings_config_entry_framework_url.format(brand_name=i18n.BRAND_NAME), + 'framework_api_key': i18n.settings_config_entry_framework_api_key.format(brand_name=i18n.BRAND_NAME), + 'model_url': i18n.settings_config_entry_model_url, + 'model_api_key': i18n.settings_config_entry_model_api_key, + 'model_name': i18n.settings_config_entry_model_name +} + +BACKEND_NAME = { + 'framework': i18n.interact_backend_framework.format(brand_name=i18n.BRAND_NAME), + 'spark': i18n.interact_backend_spark, + 'openai': i18n.interact_backend_openai +} + +QUERY_MODE_NAME = { + 'chat': i18n.query_mode_chat, + 'flow': i18n.query_mode_flow, + 'diagnose': i18n.query_mode_diagnose, + 'tuning': i18n.query_mode_tuning, +} + +DEFAULT_CONFIG = { + 'backend': 'framework', + 'query_mode': 'chat', + 'advanced_mode': False, + 'debug_mode': False, + 'spark_app_id': '', + 'spark_api_key': '', + 'spark_api_secret': '', + 'spark_url': 'wss://spark-api.xf-yun.com/v3.5/chat', + 'spark_domain': 'generalv3.5', + 'framework_url': 'https://eulercopilot.gitee.com', + 'framework_api_key': '', + 'model_url': '', + 'model_api_key': '', + 'model_name': '' +} + + +def load_config() -> dict: + try: + with open(CONFIG_PATH, 'r', encoding='utf-8') as file: + config = json.load(file) + except FileNotFoundError: + init_config() + config = load_config() + return config + + +def write_config(config: dict): + with open(CONFIG_PATH, 'w', encoding='utf-8') as json_file: + json.dump(config, json_file, indent=4) + json_file.write('\n') # 追加一行空行 + + +def init_config(): + if not os.path.exists(CONFIG_DIR): + os.makedirs(CONFIG_DIR) + write_config(DEFAULT_CONFIG) + + +def update_config(key: str, value): + if key not in DEFAULT_CONFIG: + return + config = load_config() + config.update({key: value}) + write_config(config) + + +def select_query_mode(mode: int): + modes = list(QUERY_MODE_NAME.keys()) + if mode < len(modes): + update_config('query_mode', modes[mode]) + + +def select_backend(): + backend = interact.select_backend() + if backend in ['framework', 'spark', 'openai']: + update_config('backend', backend) + + +def config_to_markdown() -> str: + config = load_config() + config_table = '\n'.join([ + f'| {CONFIG_ENTRY_NAME.get(key)} | {__get_config_item_display_name(key, value)} |' + for key, value in config.items() + ]) + return f'# {i18n.settings_markdown_title}\n\ +| {i18n.settings_markdown_header_key} \ +| {i18n.settings_markdown_header_value} |\n\ +| ----------- | ----------- |\n{config_table}' + + +def __get_config_item_display_name(key, value): + if key == 'backend': + return BACKEND_NAME.get(value, value) + if key == 'query_mode': + return QUERY_MODE_NAME.get(value, value) + return value diff --git a/src/copilot/utilities/env_info.py b/src/copilot/utilities/env_info.py new file mode 100644 index 0000000000000000000000000000000000000000..c86a02513fa01c3a8e01e4d32d39195d3347a5b2 --- /dev/null +++ b/src/copilot/utilities/env_info.py @@ -0,0 +1,64 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import os +import platform +import re +import subprocess +import sys +from typing import Optional + + +def _exec_shell_cmd(cmd: list) -> Optional[subprocess.CompletedProcess]: + try: + process = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + check=True + ) + except subprocess.CalledProcessError as e: + sys.stderr.write(e.stderr) + return None + except FileNotFoundError as e: + sys.stderr.write(str(e)) + return None + return process + + +def _porc_linux_info(shell_result: Optional[subprocess.CompletedProcess]): + if shell_result is not None: + pattern = r'PRETTY_NAME="(.+?)"' + match = re.search(pattern, shell_result.stdout) + if match: + return match.group(1) # 返回括号内匹配的内容,即PRETTY_NAME的值 + return 'Unknown Linux distribution' + + +def _porc_macos_info(shell_result: Optional[subprocess.CompletedProcess]): + if shell_result is not None: + macos_info = {} + if shell_result.returncode == 0: + lines = shell_result.stdout.splitlines() + for line in lines: + key, value = line.split(':\t\t', maxsplit=1) + macos_info[key.strip()] = value.strip() + product_name = macos_info.get('ProductName') + product_version = macos_info.get('ProductVersion') + if product_name is not None and product_version is not None: + return f'{product_name} {product_version}' + return 'Unknown macOS version' + + +def get_os_info() -> str: + system = platform.system() + if system == 'Linux': + return _porc_linux_info(_exec_shell_cmd(['cat', '/etc/os-release'])) + elif system == 'Darwin': + return _porc_macos_info(_exec_shell_cmd(['sw_vers'])) + else: + return system + + +def is_root() -> bool: + return os.geteuid() == 0 diff --git a/src/copilot/utilities/i18n.py b/src/copilot/utilities/i18n.py new file mode 100644 index 0000000000000000000000000000000000000000..4907840ed582b5fed8e8c5a854414944f8d033f5 --- /dev/null +++ b/src/copilot/utilities/i18n.py @@ -0,0 +1,165 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +from gettext import gettext as _ + +BRAND_NAME = 'openEuler Copilot System' +DOCS_URL = _('https://gitee.com/openeuler/euler-copilot-framework/blob/master/docs/user-guide/README.md') + +main_exit_prompt = _('输入 "exit" 或按下 Ctrl+C 结束对话') +main_service_is_none = _('未正确配置 LLM 后端,请检查配置文件') +main_service_framework_plugin_is_none = _('获取插件失败或插件列表为空\n请联系管理员检查后端配置') +main_exec_builtin_cmd = _('不支持执行 Shell 内置命令 "{cmd_prefix}",请复制后手动执行') +main_exec_value_error = _('执行命令时出错:{error}') +main_exec_not_found_error = _('命令不存在:{error}') +main_exec_cmd_failed_with_exit_code = _('命令 "{cmd}" 执行中止,退出码:{exit_code}') + +cli_help_prompt_question = _('通过自然语言提问') +cli_help_prompt_switch_mode = _('切换到{mode}模式') +cli_help_prompt_init_settings = _('初始化 copilot 设置') +cli_help_prompt_edit_settings = _('编辑 copilot 设置') +cli_help_prompt_select_backend = _('选择大语言模型后端') +cli_help_panel_switch_mode = _('选择问答模式') +cli_help_panel_advanced_options = _('高级选项') +cli_notif_select_one_mode = _('当前版本只能选择一种问答模式') +cli_notif_compatibility = _('当前大模型后端不支持{mode}功能\n\ +推荐使用 {brand_name} 智能体框架') +cli_notif_no_config = _('请先初始化 copilot 设置\n\ +请使用 "copilot --init" 命令初始化') + +interact_action_explain = _('解释命令') +interact_action_edit = _('编辑命令') +interact_action_execute = _('执行命令') +interact_action_explain_selected = _('解释指定命令') +interact_action_edit_selected = _('编辑指定命令') +interact_action_execute_selected = _('执行指定命令') +interact_action_execute_all = _('执行所有命令') +interact_backend_framework = _('{brand_name} 智能体') +interact_backend_spark = _('讯飞星火大模型') +interact_backend_openai = _('OpenAI 兼容模式') +interact_cancel = _('取消') + +interact_question_yes_or_no = _('是否{question_body}:') +interact_question_input_text = _('请输入{question_body}:') +interact_question_select_action = _('选择要执行的操作:') +interact_question_select_cmd = _('选择命令:') +interact_question_select_settings_entry = _('选择设置项:') +interact_question_select_backend = _('请选择大模型后端:') +interact_question_select_query_mode = _('请选择问答模式:') +interact_question_select_plugin = _('请选择插件:') +interact_select_plugins_valiidate = _('请选择至少一个插件') + +backend_general_request_failed = _('请求失败: {code}') +backend_framework_auth_invalid_api_key = _('{brand_name} 智能体 API 密钥无效,请检查配置文件') +backend_framework_request_connection_error = _('{brand_name} 智能体连接失败,请检查网络连接') +backend_framework_request_timeout = _('{brand_name} 智能体请求超时,请检查网络连接') +backend_framework_request_exceptions = _('{brand_name} 智能体请求异常,请检查网络连接') +backend_framework_request_unauthorized = _('当前会话已过期,请退出后重试') +backend_framework_request_too_many_requests = _('请求过于频繁,请稍后再试') +backend_framework_response_ended_prematurely = _('响应异常中止,请检查网络连接') +backend_framework_stream_error = _('{brand_name} 智能体遇到错误,请联系管理员定位问题') +backend_framework_stream_unknown = _('{brand_name} 智能体返回了未知内容:\n```json\n{content}\n```') +backend_framework_stream_sensitive = _('检测到违规信息,请重新提问') +backend_framework_stream_stop = _('{brand_name} 智能体已停止生成内容') +backend_framework_sugggestion = _('**你可以继续问** {sugggestion}') +backend_spark_stream_error = _('请求错误: {code}\n{message}') +backend_spark_websockets_exceptions_msg_title = _('请求错误') +backend_spark_websockets_exceptions_msg_a = _('请检查 appid 和 api_key 是否正确,或检查网络连接是否正常。\n') +backend_spark_websockets_exceptions_msg_b = _('输入 "vi ~/.config/eulercopilot/config.json" 查看和编辑配置;\n') +backend_spark_websockets_exceptions_msg_c = _('或尝试 ping {spark_url}') +backend_spark_network_error = _('访问大模型失败,请检查网络连接') +backend_openai_request_connection_error = _('连接大模型失败') +backend_openai_request_timeout = _('请求大模型超时') +backend_openai_request_exceptions = _('请求大模型异常') + +settings_markdown_title = _('当前配置') +settings_markdown_header_key = _('设置项') +settings_markdown_header_value = _('值') +settings_config_entry_backend = _('大模型后端') +settings_config_entry_query_mode = _('问答模式') +settings_config_entry_advanced_mode = _('启用高级模式') +settings_config_entry_debug_mode = _('启用调试模式') +settings_config_entry_spark_app_id = _('星火大模型 App ID') +settings_config_entry_spark_api_key = _('星火大模型 API Key') +settings_config_entry_spark_api_secret = _('星火大模型 API Secret') +settings_config_entry_spark_url = _('星火大模型 URL') +settings_config_entry_spark_domain = _('星火大模型领域') +settings_config_entry_framework_url = _('{brand_name} 智能体 URL') +settings_config_entry_framework_api_key = _('{brand_name} 智能体 API Key') +settings_config_entry_model_url = _('OpenAI 模型 URL') +settings_config_entry_model_api_key = _('OpenAI 模型 API Key') +settings_config_entry_model_name = _('OpenAI 模型名称') +settings_config_interact_query_mode_disabled_explain = _('当前后端无法使用{mode}模式') +settings_init_welcome_msg = _('欢迎使用 {brand_name} 智能体') +settings_init_welcome_usage_guide = _('使用方法:输入问题,按下 Ctrl+O 提问') +settings_init_welcome_help_hint = _('更多用法详见命令行帮助:"copilot --help"') +settings_init_welcome_docs_link = _('使用指南:{url}') +settings_init_framework_api_key_notice_title = _('获取 {brand_name} 智能体 API Key') +settings_init_framework_api_key_notice_content = _('请前往 {url},点击右上角头像图标获取 API Key') + +query_mode_chat = _('智能问答') +query_mode_flow = _('智能插件') +query_mode_diagnose = _('智能诊断') +query_mode_tuning = _('智能调优') + +prompt_general_root_true = _('当前用户为 root 用户,你生成的 shell 命令不能包含 "sudo"') +prompt_general_root_false = _('当前用户为普通用户,若你生成的 shell 命令需要 root 权限,需要包含 "sudo"') +prompt_general_system = _('''你是操作系统 {os} 的运维助理,你精通当前操作系统的管理和运维,熟悉运维脚本的编写。 +你给出的答案必须符合当前操作系统要求,你不能使用当前操作系统没有的功能。 + +格式要求: +你的回答必须使用 Markdown 格式,代码块和表格都必须用 Markdown 呈现; +你需要用中文回答问题,除了代码,其他内容都要符合汉语的规范。 + +用户可能问你一些操作系统相关的问题,你尤其需要注意安装软件包的情景: +openEuler 使用 dnf 或 yum 管理软件包,你不能在回答中使用 apt 或其他命令; +Debian 和 Ubuntu 使用 apt 管理软件包,你也不能在回答中使用 dnf 或 yum 命令; +你可能还会遇到使用其他类 unix 系统的情景,比如 macOS 要使用 Homebrew 安装软件包。 + +请特别注意当前用户的权限: +{prompt_general_root} + +在给用户返回 shell 命令时,你必须返回安全的命令,不能进行任何危险操作! +如果涉及到删除文件、清理缓存、删除用户、卸载软件、wget下载文件等敏感操作,你必须生成安全的命令 + +危险操作举例: ++ 例1: 强制删除 + ```bash + rm -rf /path/to/sth + ``` ++ 例2: 卸载软件包时默认同意 + ```bash + dnf remove -y package_name + ``` +你不能输出类似于上述例子的命令! + +由于用户使用命令行与你交互,你需要避免长篇大论,请使用简洁的语言,一般情况下你的回答不应超过1000字。 +''') +prompt_general_chat = _('''根据用户输入的问题,使用 Markdown 格式输出。 + +用户的问题: +{question} + +基本要求: +1. 如果涉及到生成 shell 命令,请用单行 shell 命令回答,不能使用多行 shell 命令 +2. 如果涉及 shell 命令或代码,请用 Markdown 代码块输出,必须标明代码的语言 +3. 如果用户要求你生成的命令涉及到数据输入,你需要正确处理数据输入的方式,包括用户交互 +4. 当前操作系统是 {os},你的回答必须符合当前系统要求,不能使用当前系统没有的功能 +''') +prompt_general_explain_cmd = _('''```bash +{cmd} +``` +请解释上面的 Shell 命令 + +要求: +先在代码块中打印一次上述命令,再有条理地解释命令中的主要步骤 +''') +prompt_framework_markdown_format = _('''格式要求: ++ 你的回答中的代码块和表格都必须用 Markdown 呈现; ++ 你需要用中文回答问题,除了代码,其他内容都要符合汉语的规范。 +''') +prompt_framework_extra_install = _('''其他要求: ++ openEuler 使用 dnf 管理软件包,你不能在回答中使用 apt 或其他软件包管理器 ++ {prompt_general_root} +''') +prompt_framework_keyword_install = _('安装') +prompt_framework_plugin_ip = _('当前机器的IP为') diff --git a/src/copilot/utilities/interact.py b/src/copilot/utilities/interact.py new file mode 100644 index 0000000000000000000000000000000000000000..705af4ef48e1b68532de8a164cc957bd956fbd6e --- /dev/null +++ b/src/copilot/utilities/interact.py @@ -0,0 +1,207 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +from typing import Optional + +import questionary + +from copilot.backends.framework_api import PluginData +from copilot.utilities import config_manager, i18n + +ACTIONS_SINGLE_CMD = [ + questionary.Choice( + i18n.interact_action_explain, + value='explain', + shortcut_key='a' + ), + questionary.Choice( + i18n.interact_action_edit, + value='edit', + shortcut_key='z' + ), + questionary.Choice( + i18n.interact_action_execute, + value='execute', + shortcut_key='x' + ), + questionary.Choice( + i18n.interact_cancel, + value='cancel', + shortcut_key='c' + ) +] + +ACTIONS_MULTI_CMDS = [ + questionary.Choice( + i18n.interact_action_explain_selected, + value='explain', + shortcut_key='a' + ), + questionary.Choice( + i18n.interact_action_edit_selected, + value='edit', + shortcut_key='z' + ), + questionary.Choice( + i18n.interact_action_execute_all, + value='execute_all', + shortcut_key='x' + ), + questionary.Choice( + i18n.interact_action_execute_selected, + value='execute_selected', + shortcut_key='s' + ), + questionary.Choice( + i18n.interact_cancel, + value='cancel', + shortcut_key='c' + ) +] + +BACKEND_CHOICES = [ + questionary.Choice( + i18n.interact_backend_framework.format(brand_name=i18n.BRAND_NAME), + value='framework', + shortcut_key='e' + ), + questionary.Choice( + i18n.interact_backend_spark, + value='spark', + shortcut_key='s' + ), + questionary.Choice( + i18n.interact_backend_openai, + value='openai', + shortcut_key='o' + ), + questionary.Choice( + i18n.interact_cancel, + value='cancel', + shortcut_key='c' + ) +] + +CUSTOM_STYLE_FANCY = questionary.Style( + [ + ('separator', 'fg:#00afff'), + ('qmark', 'fg:#005f87 bold'), + ('question', 'bold'), + ('selected', 'fg:#00afff bold'), + ('pointer', 'fg:#005f87 bold'), + ('highlighted', 'bold'), + ('answer', 'fg:#00afff bold'), + ('text', 'fg:#808080'), + ('disabled', 'fg:#808080 italic'), + ] +) + + +def select_backend() -> str: + return questionary.select( + i18n.interact_question_select_backend, + choices=BACKEND_CHOICES, + qmark='❯', + use_shortcuts=True, + style=CUSTOM_STYLE_FANCY, + ).ask() + + +def select_action(has_multi_cmds: bool) -> str: + return questionary.select( + i18n.interact_question_select_action, + choices=ACTIONS_MULTI_CMDS if has_multi_cmds else ACTIONS_SINGLE_CMD, + qmark='❯', + use_shortcuts=True, + style=CUSTOM_STYLE_FANCY + ).ask() + + +def select_command(commands: list) -> str: + return questionary.select( + i18n.interact_question_select_cmd, + choices=commands, + qmark='❯', + style=CUSTOM_STYLE_FANCY + ).ask() + + +def select_command_with_index(commands: list) -> int: + command = questionary.select( + i18n.interact_question_select_cmd, + choices=commands, + qmark='❯', + style=CUSTOM_STYLE_FANCY + ).ask() + return commands.index(command) + + +def select_multiple_commands(commands: list) -> list: + return questionary.checkbox( + i18n.interact_question_select_cmd, + choices=commands, + qmark='❯', + style=CUSTOM_STYLE_FANCY + ).ask() + + +def select_one_plugin(plugins: list[PluginData]) -> str: + return questionary.select( + i18n.interact_question_select_plugin, + choices=__get_plugin_choices(plugins), + qmark='❯', + style=CUSTOM_STYLE_FANCY + ).ask() + + +def select_settings_entry() -> str: + return questionary.select( + i18n.interact_question_select_settings_entry, + choices=__get_settings_entry_choices(), + qmark='❯', + style=CUSTOM_STYLE_FANCY, + ).ask() + + +def select_query_mode(backend: str) -> str: + return questionary.select( + i18n.interact_question_select_query_mode, + choices=__get_query_mode_choices(backend), + qmark='❯', + style=CUSTOM_STYLE_FANCY, + ).ask() + + +def ask_boolean(question: str) -> bool: + return questionary.confirm(question, default=False, style=CUSTOM_STYLE_FANCY).ask() + + +def __get_plugin_choices(plugins: list[PluginData]) -> list: + return [ + questionary.Choice( + plugin.plugin_name, + value=plugin.id + ) for plugin in plugins + ] + + +def __get_settings_entry_choices() -> list: + choices = [questionary.Choice(name, item) for item, name in config_manager.CONFIG_ENTRY_NAME.items()] + choices.append(questionary.Choice(i18n.interact_cancel, value='cancel')) + return choices + + +def __get_query_mode_choices(backend: str) -> list: + def __disabled(name: str, item: str) -> Optional[str]: + return ( + i18n.settings_config_interact_query_mode_disabled_explain.format(mode=name) + if backend != 'framework' and item != 'chat' + else None + ) + + return [ + questionary.Choice( + name, + item, + disabled=__disabled(name, item) + ) for item, name in config_manager.QUERY_MODE_NAME.items() + ] diff --git a/src/copilot/utilities/markdown_renderer.py b/src/copilot/utilities/markdown_renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..261c3b6e37fe8e68b3227e54ad3a720d957b5275 --- /dev/null +++ b/src/copilot/utilities/markdown_renderer.py @@ -0,0 +1,21 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +from rich.console import Group +from rich.live import Live +from rich.markdown import Markdown +from rich.panel import Panel + + +class MarkdownRenderer: + + @staticmethod + def update(live: Live, content: str, sugggestion: str = '', refresh: bool = True): + content_panel = Panel(Markdown(content, code_theme='github-dark'), border_style='gray50') + if not sugggestion: + live.update(content_panel, refresh=refresh) + return + sugggestion_panel = Panel(Markdown(sugggestion, code_theme='github-dark'), border_style='gray50') + live.update( + Group(content_panel, sugggestion_panel), + refresh=refresh + ) diff --git a/src/copilot/utilities/shell_script.py b/src/copilot/utilities/shell_script.py new file mode 100644 index 0000000000000000000000000000000000000000..61f6debceef3ee215f601d23be89eaa0ff3d8cf9 --- /dev/null +++ b/src/copilot/utilities/shell_script.py @@ -0,0 +1,15 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import os +import uuid + + +def write_shell_script(content: str) -> str: + '''将脚本内容写进 sh 文件中,并返回执行命令''' + script_name = f'plugin_gen_script_{str(uuid.uuid4())[:8]}.sh' + script_path = os.path.join(os.path.expanduser('~'), '.eulercopilot', 'scripts', script_name) + os.makedirs(os.path.dirname(script_path), exist_ok=True) + with open(script_path, 'w', encoding='utf-8') as script_file: + script_file.write(content) + os.chmod(script_path, 0o700) + return f'bash {script_path}' diff --git a/src/eulercopilot.sh b/src/eulercopilot.sh new file mode 100644 index 0000000000000000000000000000000000000000..4f10f29627ab85ef53def57b7f763819ca7ddfb3 --- /dev/null +++ b/src/eulercopilot.sh @@ -0,0 +1,103 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +read_query_mode() { + if [ ! -f ~/.config/eulercopilot/config.json ]; then + return + fi + + local query_mode + query_mode=$(jq '.query_mode' ~/.config/eulercopilot/config.json) + + if [ "$query_mode" = "\"chat\"" ]; then + echo "智能问答" + elif [ "$query_mode" = "\"flow\"" ]; then + echo "智能插件" + elif [ "$query_mode" = "\"diagnose\"" ]; then + echo "智能诊断" + elif [ "$query_mode" = "\"tuning\"" ]; then + echo "智能调优" + else + echo "未知模式" + fi +} + +get_prompt() { + local username + local hostname + local current_base_dir + local prompt_end + local prompt + + username=$(whoami) + hostname=$(hostname -s) + if [[ "$PWD" == "$HOME" ]]; then + current_base_dir='~' + else + current_base_dir=$(basename "$PWD") + fi + if [[ $EUID -eq 0 ]]; then + prompt_end='#' + else + prompt_end='$' + fi + prompt="${PS1//\\u/$username}" + prompt="${prompt//\\h/$hostname}" + prompt="${prompt//\\W/$current_base_dir}" + prompt="${prompt//\\$/$prompt_end}" + echo "${prompt}" +} + +set_prompt() { + local query_mode + query_mode="$(read_query_mode)" + + if [ -z "$query_mode" ]; then + return + fi + + if [[ "$PS1" != *"\[\033[1;33m"* ]]; then + PS1="╭─ \[\033[1;33m\]${query_mode}\[\033[0m\] ─╮\n╰${PS1}" + fi +} + +revert_prompt() { + PS1="${PS1#*╰}" +} + +run_copilot() { + local terminal_settings + local readline="${READLINE_LINE}" + if [[ -z "${readline}" ]]; then + READLINE_LINE="copilot " + READLINE_POINT=${#READLINE_LINE} + elif [[ ! "${readline}" =~ ^copilot ]]; then + terminal_settings=$(stty -g) + READLINE_LINE="" + local _ps1 + local prompt + _ps1=$(get_prompt) + prompt=${_ps1#*\\n} + history -s "${readline}" && echo "${prompt}${readline}" + stty sane && (copilot "${readline}") + stty "${terminal_settings}" + if [[ $_ps1 =~ \\n ]]; then + prompt="${_ps1%%\\n*}" + prompt="${prompt//\\[/}" + prompt="${prompt//\\]/}" + echo -e "${prompt}" + fi + elif [[ "${readline}" == "copilot " ]]; then + READLINE_LINE="" + if [[ "$PS1" == *"\[\033[1;33m"* ]]; then + revert_prompt + printf "\033[1;31m已关闭 openEuler Copilot System 提示符\033[0m\n" + else + set_prompt + printf "\033[1;32m已开启 openEuler Copilot System 提示符\033[0m\n" + fi + fi +} + +bind -x '"\C-o": run_copilot' 2>/dev/null +alias set_copilot_prompt='set_prompt' +alias revert_copilot_prompt='revert_prompt' diff --git a/src/setup.py b/src/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..ab26b8afed6d58816ba17e48931ff65e027760b5 --- /dev/null +++ b/src/setup.py @@ -0,0 +1,73 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + +import os + +from Cython.Build import cythonize +from Cython.Distutils import build_ext +from setuptools import setup +from setuptools.extension import Extension + + +def add_py_files(module_name): + return [ + os.path.join(module_name, f) + for f in os.listdir(module_name) + if f.endswith('.py') + ] + + +# 定义编译选项 +cython_compile_options = { + 'language_level': '3', + 'annotate': False, # 生成 HTML 注解文件 + 'compiler_directives': {}, +} + +# 定义 Cython 编译规则 +cython_files = [] +cython_files += add_py_files('copilot/app') +cython_files += add_py_files('copilot/backends') +cython_files += add_py_files('copilot/utilities') + +extensions = [Extension(f.replace("/", ".")[:-3], [f]) for f in cython_files] + +# 定义 setup() 参数 +setup( + name='copilot', + version='1.2.1', + description='openEuler Copilot System Command Line Assistant', + author='Hongyu Shi', + author_email='shihongyu15@huawei.com', + url='https://gitee.com/openeuler/euler-copilot-shell', + py_modules=['copilot.__init__', 'copilot.__main__'], + ext_modules=cythonize( + extensions, + compiler_directives=cython_compile_options['compiler_directives'], + annotate=cython_compile_options['annotate'], + language_level=cython_compile_options['language_level'] + ), + packages=['copilot'], + cmdclass={'build_ext': build_ext}, + include_package_data=True, + zip_safe=False, + classifiers=[ + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', + 'License :: OSI Approved :: Mulan Permissive Software License, Version 2', # 木兰许可证 v2 + 'Operating System :: POSIX :: Linux', + 'Operating System :: MacOS :: MacOS X' + ], + python_requires='>=3.9', # Python 版本要求为 3.9 及以上 + install_requires=[ # 添加项目依赖的库 + 'websockets', + 'requests', + 'rich', + 'typer', + 'questionary' + ], + entry_points={ + 'console_scripts': ['copilot=copilot.__main__:entry_point'] + } +)