diff --git a/src/oebuild/app/conf/plugins.yaml b/src/oebuild/app/conf/plugins.yaml index b44665a42d9c71cbcaf34c7e0379b2273c825fea..40ca2988a635612eb3e8542a744dfc74ce870bc5 100644 --- a/src/oebuild/app/conf/plugins.yaml +++ b/src/oebuild/app/conf/plugins.yaml @@ -23,3 +23,9 @@ plugins: - name: menv class: Menv path: plugins/m_env/m_env.py +- name: deploy-target + class: DeployTarget + path: plugins/deploy/deploy-target.py +- name: undeploy-target + class: UnDeployTarget + path: plugins/deploy/deploy-target.py diff --git a/src/oebuild/app/plugins/bitbake/base_build.py b/src/oebuild/app/plugins/bitbake/base_build.py index efc2b6b907ef765454ed9024c00624bf623e0f0e..6e79fed5fb60ad16da0fda495d79cc2e64da28ef 100644 --- a/src/oebuild/app/plugins/bitbake/base_build.py +++ b/src/oebuild/app/plugins/bitbake/base_build.py @@ -14,7 +14,6 @@ import os import oebuild.util as oebuild_util from oebuild.local_conf import LocalConf from oebuild.bblayers import BBLayers -import oebuild.app.plugins.bitbake.const as bitbake_const class BaseBuild: ''' diff --git a/src/oebuild/app/plugins/bitbake/const.py b/src/oebuild/app/plugins/bitbake/const.py index d6d8082fc7d102cb70cc0999c4f1123d7747285c..18f28e803a43b49ce03cf7be5036e9c33219158b 100644 --- a/src/oebuild/app/plugins/bitbake/const.py +++ b/src/oebuild/app/plugins/bitbake/const.py @@ -9,9 +9,6 @@ EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. ''' -CONTAINER_SRC = '/usr1/openeuler/src' -CONTAINER_BUILD = '/home/openeuler' -CONTAINER_USER = "openeuler" BASH_BANNER = ''' Welcome to the openEuler Embedded build environment, diff --git a/src/oebuild/app/plugins/bitbake/in_container.py b/src/oebuild/app/plugins/bitbake/in_container.py index 45fb09af69aa5517e2c89058e7f75e9a645dc0ce..24a49dc90f4950493e9523767fda8085f577c966 100644 --- a/src/oebuild/app/plugins/bitbake/in_container.py +++ b/src/oebuild/app/plugins/bitbake/in_container.py @@ -79,34 +79,25 @@ class InContainer(BaseBuild): cwd_name = os.path.basename(os.getcwd()) volumns = [] volumns.append("/dev/net/tun:/dev/net/tun") - volumns.append(self.configure.source_dir() + ':' + bitbake_const.CONTAINER_SRC) + volumns.append(self.configure.source_dir() + ':' + oebuild_util.CONTAINER_SRC) volumns.append(os.path.join(self.configure.build_dir(), cwd_name) + ':' + - os.path.join(bitbake_const.CONTAINER_BUILD, cwd_name)) + os.path.join(oebuild_util.CONTAINER_BUILD, cwd_name)) if toolchain_dir is not None: volumns.append(toolchain_dir + ":" + NATIVE_GCC_DIR) if sstate_cache is not None: volumns.append(sstate_cache + ":" + SSTATE_CACHE) - try: - env_container = EnvContainer( - volumns=volumns, - short_id="" - ) - check_container = env.is_same_container(data=env_container) - except Exception as e_p: - raise e_p - - if not check_container \ - or env.container.short_id is None \ - or not self.client.is_container_exists(env.container.short_id): + if env.container is None \ + or env.container.short_id is None \ + or not self.client.is_container_exists(env.container.short_id): # judge which container container:Container = self.client.container_run_simple( image=docker_image, volumes=volumns) # type: ignore - env_container.short_id = container.short_id + env_container = EnvContainer(container.short_id) env.set_env_container(env_container) env.export_env() @@ -122,7 +113,7 @@ class InContainer(BaseBuild): container:Container = self.client.get_container(self.container_id) # type: ignore self.init_bash(container=container, - build_dir=os.path.basename(os.getcwd())) + build_dir_name=os.path.basename(os.getcwd())) try: self.init_bitbake(container=container) @@ -134,7 +125,7 @@ class InContainer(BaseBuild): bblayers_dir = os.path.join(os.getcwd(), "conf", "bblayers.conf") self.add_bblayers( bblayers_dir=bblayers_dir, - pre_dir=bitbake_const.CONTAINER_SRC, + pre_dir=oebuild_util.CONTAINER_SRC, base_dir=self.configure.source_dir(), layers=parse_compile.layers) @@ -151,8 +142,8 @@ class InContainer(BaseBuild): res = self.client.container_exec_command( container=container, command="bash .bashrc", - user=bitbake_const.CONTAINER_USER, - work_space=f"/home/{bitbake_const.CONTAINER_USER}") + user=oebuild_util.CONTAINER_USER, + work_space=f"/home/{oebuild_util.CONTAINER_USER}") for line in res.output: logger.info(line.decode().strip('\n')) @@ -163,7 +154,7 @@ class InContainer(BaseBuild): content = self._add_bashrc(content=content, line=b_s) self.update_bashrc(container=container, content=content) os.system( - f"docker exec -it -u {bitbake_const.CONTAINER_USER} {container.short_id} bash") + f"docker exec -it -u {oebuild_util.CONTAINER_USER} {container.short_id} bash") self.restore_bashrc(container=container) @@ -178,9 +169,9 @@ class InContainer(BaseBuild): res = self.client.container_exec_command( container=container, - command=f"bash /home/{bitbake_const.CONTAINER_USER}/.bashrc", - user=bitbake_const.CONTAINER_USER, - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + command=f"bash /home/{oebuild_util.CONTAINER_USER}/.bashrc", + user=oebuild_util.CONTAINER_USER, + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=False) if res.exit_code != 0: raise ValueError(res.output.decode()) @@ -190,8 +181,8 @@ class InContainer(BaseBuild): res = self.client.container_exec_command( container=container, user='root', - command=f"id {bitbake_const.CONTAINER_USER}", - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + command=f"id {oebuild_util.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=False) if res.exit_code != 0: raise ValueError("check docker user id faild") @@ -200,19 +191,19 @@ class InContainer(BaseBuild): cuids = res_cont.split(' ') # get uid from container in default user - pattern = re.compile(r'(?<=uid=)\d{1,}(?=\(' + bitbake_const.CONTAINER_USER + r'\))') + pattern = re.compile(r'(?<=uid=)\d{1,}(?=\(' + oebuild_util.CONTAINER_USER + r'\))') match_uid = pattern.search(cuids[0]) if match_uid: cuid = match_uid.group() else: - raise ValueError(f"can not get container {bitbake_const.CONTAINER_USER} uid") + raise ValueError(f"can not get container {oebuild_util.CONTAINER_USER} uid") # get gid from container in default user - pattern = re.compile(r'(?<=gid=)\d{1,}(?=\(' + bitbake_const.CONTAINER_USER + r'\))') + pattern = re.compile(r'(?<=gid=)\d{1,}(?=\(' + oebuild_util.CONTAINER_USER + r'\))') match_gid = pattern.search(cuids[1]) if match_gid: cgid = match_gid.group() else: - raise ValueError(f"can not get container {bitbake_const.CONTAINER_USER} gid") + raise ValueError(f"can not get container {oebuild_util.CONTAINER_USER} gid") # judge host uid and gid are same with container uid and gid # if not same and change container uid and gid equal to host's uid and gid @@ -225,16 +216,16 @@ class InContainer(BaseBuild): self.client.container_exec_command( container=container, user='root', - command=f"usermod -u {uid} {bitbake_const.CONTAINER_USER}", - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + command=f"usermod -u {uid} {oebuild_util.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=False) def _change_container_gid(self, container: Container, gid: int): self.client.container_exec_command( container=container, user='root', - command=f"groupmod -g {gid} {bitbake_const.CONTAINER_USER}", - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + command=f"groupmod -g {gid} {oebuild_util.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=False) def _install_sudo(self, container: Container): @@ -244,7 +235,7 @@ class InContainer(BaseBuild): container=container, user='root', command="which sudo", - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=False ) if resp.exit_code != 0: @@ -257,7 +248,7 @@ class InContainer(BaseBuild): container=container, user='root', command=r"sed -i 's/repo.openeuler.org/mirrors.huaweicloud.com\/openeuler/g' /etc/yum.repos.d/openEuler.repo", - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=False ) @@ -266,13 +257,13 @@ class InContainer(BaseBuild): container=container, user='root', command=f"yum install {software} -y", - work_space=f"/home/{bitbake_const.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", stream=True ) for line in resp.output: logger.info(line.decode().strip('\n')) - def init_bash(self, container: Container, build_dir): + def init_bash(self, container: Container, build_dir_name): ''' Bitbake will initialize the compilation environment by reading the user initialization script first, then making directional @@ -282,9 +273,9 @@ class InContainer(BaseBuild): content = self._get_bashrc_content(container=container) init_sdk_command = '. /opt/buildtools/nativesdk/environment-setup-x86_64-pokysdk-linux' - set_template = f'export TEMPLATECONF="{bitbake_const.CONTAINER_SRC}/yocto-meta-openeuler/.oebuild"' - init_oe_comand = f'. {bitbake_const.CONTAINER_SRC}/yocto-poky/oe-init-build-env \ - {bitbake_const.CONTAINER_BUILD}/{build_dir}' + set_template = f'export TEMPLATECONF="{oebuild_util.CONTAINER_SRC}/yocto-meta-openeuler/.oebuild"' + init_oe_comand = f'. {oebuild_util.CONTAINER_SRC}/yocto-poky/oe-init-build-env \ + {oebuild_util.CONTAINER_BUILD}/{build_dir_name}' init_command = [init_sdk_command, set_template, init_oe_comand] new_content = self._init_bashrc_content(content, init_command) @@ -300,9 +291,9 @@ class InContainer(BaseBuild): self.client.copy_to_container( container=container, source_path=tmp_file, - to_path=f'/home/{bitbake_const.CONTAINER_USER}') + to_path=f'/home/{oebuild_util.CONTAINER_USER}') container.exec_run( - cmd=f"mv /home/{bitbake_const.CONTAINER_USER}/{tmp_file} /home/{bitbake_const.CONTAINER_USER}/.bashrc", + cmd=f"mv /home/{oebuild_util.CONTAINER_USER}/{tmp_file} /home/{oebuild_util.CONTAINER_USER}/.bashrc", user="root" ) os.remove(tmp_file) @@ -319,7 +310,7 @@ class InContainer(BaseBuild): def _get_bashrc_content(self, container: Container): content = self.client.container_exec_command( container=container, - command=f"cat /home/{bitbake_const.CONTAINER_USER}/.bashrc", + command=f"cat /home/{oebuild_util.CONTAINER_USER}/.bashrc", user="root", work_space=None, stream=False).output diff --git a/src/oebuild/app/plugins/deploy/__init__.py b/src/oebuild/app/plugins/deploy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/oebuild/app/plugins/deploy/com_target.py b/src/oebuild/app/plugins/deploy/com_target.py new file mode 100644 index 0000000000000000000000000000000000000000..a9907d105b9bf77b43833947d6c6a754c7c34526 --- /dev/null +++ b/src/oebuild/app/plugins/deploy/com_target.py @@ -0,0 +1,289 @@ +''' +Copyright (c) 2023 openEuler Embedded +oebuild is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +''' + +import os +import sys +import re +import logging + +from docker.models.containers import Container + +from oebuild.docker_proxy import DockerProxy +from oebuild.configure import Configure +import oebuild.util as oebuild_util +from oebuild.parse_compile import ParseCompile, CheckCompileError,BUILD_IN_DOCKER +from oebuild.parse_env import ParseEnv,EnvContainer +from oebuild.m_log import logger + +logger = logging.getLogger() + +TARGET_DIR_NAME = "target_dev" +TARGET_SCRIPT_NAME = "oebuild_dev" + +class ComTarget: + def __init__(self) -> None: + self.configure = Configure() + self.client:DockerProxy = None + self.container_id = None + self.work_dir = os.getcwd() + + def exec(self, str_args: str, fun): + + self.client = DockerProxy() + + if not self._check_compile_directory(): + logger.error("you must be worked in compile directory") + sys.exit(-1) + + if not self._check_if_docker_compile(): + logger.error("the deploy function only be supported in working with docker build") + sys.exit(-1) + + if not self._check_yocto_poky(): + logger.error("""please make sure that yocto-poky source code in src directory, or you can run: + oebuild update layer""") + sys.exit(-1) + + if not self._check_conf_directory(): + logger.error("you must work in a exist build directory, that mean you built before or initialize environment at least") + sys.exit(-1) + + logger.info("initializing environment, please wait ...") + parse_env = ParseEnv(env_dir='.env') + self.deal_env_container(parse_env, oebuild_util.DEFAULT_DOCKER) + container:Container = self.client.get_container(self.container_id) + self._make_and_copy_lib(container=container) + self.bak_bash(container=container) + self.init_bash(container=container) + content = self._get_bashrc_content(container=container) + content = oebuild_util.add_bashrc(content=content, line=f"export PATH=$PATH:/home/openeuler/{TARGET_DIR_NAME}") + content = oebuild_util.add_bashrc(content=content, line=f"mv -f /home/{oebuild_util.CONTAINER_USER}/{self.old_bashrc} /home/{oebuild_util.CONTAINER_USER}/.bashrc") + content = oebuild_util.add_bashrc(content=content, line=f"{TARGET_SCRIPT_NAME} {fun} {str_args}") + print(f"{TARGET_SCRIPT_NAME} {str_args}") + content = oebuild_util.add_bashrc(content=content, line=f"rm -rf /home/openeuler/{TARGET_DIR_NAME} && exit") + self.update_bashrc(container=container, content=content) + os.system(f"docker exec -it -u {oebuild_util.CONTAINER_USER} {container.short_id} bash") + + def _check_conf_directory(self,): + # check if exists local.conf + if not os.path.exists(os.path.join(self.work_dir, "conf/local.conf")): + return False + # check if exists bblayers.conf + if not os.path.exists(os.path.join(self.work_dir, "conf/bblayers.conf")): + return False + return True + + def _check_if_docker_compile(self): + ''' + the deploy feature should only be runed in docker build type + ''' + compile_path = os.path.join(self.work_dir, "compile.yaml") + try: + parse_compile = ParseCompile(compile_path) + except CheckCompileError as c_e: + logger.error(str(c_e)) + sys.exit(-1) + if parse_compile.build_in != BUILD_IN_DOCKER: + return False + return True + + def _check_compile_directory(self,): + ''' + The execution of the bitbake instruction mainly relies + on compile.yaml, which is initialized by parsing the file + ''' + return os.path.exists(os.path.join(self.work_dir, 'compile.yaml')) + + def _check_yocto_poky(self,): + ''' + package deploy need poky lib, so we have a detect about yocto-poky + ''' + return os.path.exists(self.configure.source_poky_dir()) + + def _make_and_copy_lib(self, container:Container): + # everytime, we should make sure that script is updated, so we make a rm action before copy + container.exec_run(f"rm -rf /home/openeuler/{TARGET_DIR_NAME}") + # copy package lib to docker + curr_path = os.path.dirname(os.path.realpath(__file__)) + lib_path = os.path.join(curr_path, TARGET_DIR_NAME) + self.client.copy_to_container(container=container, source_path=lib_path, to_path="/home/openeuler/") + container.exec_run(f"chmod 755 /home/openeuler/{TARGET_DIR_NAME}/{TARGET_SCRIPT_NAME}") + + def deal_env_container(self, env: ParseEnv,docker_image:str): + ''' + This operation realizes the processing of the container, + controls how the container is processed by parsing the env + variable, if the container does not exist, or the original + environment and the current environment that needs to be set + are inconsistent, you need to create a new container, otherwise + directly enable the sleeping container + ''' + cwd_name = os.path.basename(self.work_dir) + volumns = [] + volumns.append("/dev/net/tun:/dev/net/tun") + volumns.append(self.configure.source_dir() + ':' + oebuild_util.CONTAINER_SRC) + volumns.append(os.path.join(self.configure.build_dir(), cwd_name) + + ':' + + os.path.join(oebuild_util.CONTAINER_BUILD, cwd_name)) + + if env.container is None \ + or env.container.short_id is None \ + or not self.client.is_container_exists(env.container.short_id): + # judge which container + container:Container = self.client.container_run_simple( + image=docker_image, + volumes=volumns) # type: ignore + + env_container = EnvContainer(container.short_id) + env.set_env_container(env_container) + env.export_env() + + self.container_id = env.container.short_id + container:Container = self.client.get_container(self.container_id) # type: ignore + if not self.client.is_container_running(container): + self.client.start_container(container) + + def bak_bash(self, container: Container): + ''' + xxx + ''' + old_bash = oebuild_util.generate_random_str(6) + self.client.container_exec_command( + container=container, + command=f"cp /home/{oebuild_util.CONTAINER_USER}/.bashrc /home/{oebuild_util.CONTAINER_USER}/{old_bash}", + user="root", + work_space=None, + stream=False) + self.old_bashrc = old_bash + + def init_bash(self, container: Container): + ''' + Bitbake will initialize the compilation environment by reading + the user initialization script first, then making directional + substitutions, and finally writing the initialization script + ''' + self._check_change_ugid(container=container) + # read container default user .bashrc content + content = self._get_bashrc_content(container=container) + init_sdk_command = f'. {oebuild_util.SDK_ABSOLATE_PATH}' + build_dir_name = os.path.basename(self.work_dir) + init_oe_command = f'. {oebuild_util.CONTAINER_SRC}/yocto-poky/oe-init-build-env \ + {oebuild_util.CONTAINER_BUILD}/{build_dir_name}' + init_command = [init_sdk_command, init_oe_command] + new_content = oebuild_util.init_bashrc_content(content, init_command) + self.update_bashrc(container=container, content=new_content) + + def _get_bashrc_content(self, container: Container): + content = self.client.container_exec_command( + container=container, + command=f"cat /home/{oebuild_util.CONTAINER_USER}/.bashrc", + user="root", + work_space=None, + stream=False).output + + return content.decode() + + def update_bashrc(self, container: Container, content: str): + ''' + update user initialization script by replace file, first create + a file and writed content and copy it to container's .bashrc, finally + remove it + ''' + tmp_file = self._set_tmpfile_content(content) + self.client.copy_to_container( + container=container, + source_path=tmp_file, + to_path=f'/home/{oebuild_util.CONTAINER_USER}') + container.exec_run( + cmd=f"mv /home/{oebuild_util.CONTAINER_USER}/{tmp_file} /home/{oebuild_util.CONTAINER_USER}/.bashrc", + user="root" + ) + os.remove(tmp_file) + + def _set_tmpfile_content(self, content: str): + while True: + tmp_file = oebuild_util.generate_random_str(6) + if os.path.exists(tmp_file): + continue + with open(tmp_file, 'w', encoding="utf-8") as w_f: + w_f.write(content) + break + return tmp_file + + def restore_bashrc(self, container: Container): + ''' + Restoring .bashrc will strip out the command line + content added during bitbake initialization + ''' + old_content = self._get_bashrc_content(container=container) + self.update_bashrc(container=container, + content=self._restore_bashrc_content(old_content=old_content)) + + def _restore_bashrc_content(self, old_content): + new_content = '' + for line in old_content.split('\n'): + line: str = line + if line.endswith(oebuild_util.BASH_END_FLAG) or line.replace(" ", '') == '': + continue + new_content = new_content + line + '\n' + return new_content + + def _check_change_ugid(self, container: Container): + res = self.client.container_exec_command( + container=container, + user='root', + command=f"id {oebuild_util.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", + stream=False) + if res.exit_code != 0: + raise ValueError("check docker user id faild") + + res_cont:str = res.output.decode() + + cuids = res_cont.split(' ') + # get uid from container in default user + pattern = re.compile(r'(?<=uid=)\d{1,}(?=\(' + oebuild_util.CONTAINER_USER + r'\))') + match_uid = pattern.search(cuids[0]) + if match_uid: + cuid = match_uid.group() + else: + raise ValueError(f"can not get container {oebuild_util.CONTAINER_USER} uid") + # get gid from container in default user + pattern = re.compile(r'(?<=gid=)\d{1,}(?=\(' + oebuild_util.CONTAINER_USER + r'\))') + match_gid = pattern.search(cuids[1]) + if match_gid: + cgid = match_gid.group() + else: + raise ValueError(f"can not get container {oebuild_util.CONTAINER_USER} gid") + + # judge host uid and gid are same with container uid and gid + # if not same and change container uid and gid equal to host's uid and gid + if os.getuid() != cuid: + self._change_container_uid(container=container, uid=os.getuid()) + if os.getgid() != cgid: + self._change_container_gid(container=container, gid=os.getgid()) + + def _change_container_uid(self, container: Container, uid: int): + self.client.container_exec_command( + container=container, + user='root', + command=f"usermod -u {uid} {oebuild_util.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", + stream=False) + + def _change_container_gid(self, container: Container, gid: int): + self.client.container_exec_command( + container=container, + user='root', + command=f"groupmod -g {gid} {oebuild_util.CONTAINER_USER}", + work_space=f"/home/{oebuild_util.CONTAINER_USER}", + stream=False) \ No newline at end of file diff --git a/src/oebuild/app/plugins/deploy/deploy-image.py b/src/oebuild/app/plugins/deploy/deploy-image.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/oebuild/app/plugins/deploy/deploy-target.py b/src/oebuild/app/plugins/deploy/deploy-target.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe8e1c6de093caef67101fd808e1c84cd586fd9 --- /dev/null +++ b/src/oebuild/app/plugins/deploy/deploy-target.py @@ -0,0 +1,137 @@ +''' +Copyright (c) 2023 openEuler Embedded +oebuild is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +''' + +import os +import sys +import argparse +import re +import textwrap +import logging + + +from oebuild.command import OebuildCommand +from oebuild.app.plugins.deploy.com_target import ComTarget + +logger = logging.getLogger() + +class DeployTarget(OebuildCommand): + ''' + we use package in a + ''' + def __init__(self) -> None: + super().__init__( + '{}', + 'deploy software on line', + textwrap.dedent('''\ +Deploys a recipe's build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being +overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have +been installed on the target beforehand. +''' + )) + + def do_add_parser(self, parser_adder) -> argparse.ArgumentParser: + parser = self._parser( + parser_adder, + usage=''' +oebuild deploy-target [-h] [-c] [-s] [-n] [-p] [--no-check-space] [-e SSH_EXEC] [-P PORT] [-I KEY] [-S | --no-strip] recipename target +''') + + return parser + + def do_run(self, args: argparse.Namespace, unknown = None): + if '-h' in unknown or '--help' in unknown: + self.print_help_msg() + return + str_args = ' '.join(unknown) + com_target = ComTarget() + com_target.exec(str_args=str_args, fun="deploy-target") + + def print_help_msg(self,): + print(""" +usage: oebuild deploy-target [-h] [-c] [-s] [-n] [-p] [--no-check-space] [-e SSH_EXEC] [-P PORT] [-I KEY] [-S | --no-strip] recipename target + +Deploys a recipe's build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being +overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have +been installed on the target beforehand. + +arguments: + recipename Recipe to deploy + target Live target machine running an ssh server: user@hostname[:destdir] + +options: + -h, --help show this help message and exit + -c, --no-host-check Disable ssh host key checking + -s, --show-status Show progress/status output + -n, --dry-run List files to be deployed only + -p, --no-preserve Do not preserve existing files + --no-check-space Do not check for available space before deploying + -e SSH_EXEC, --ssh-exec SSH_EXEC + Executable to use in place of ssh + -P PORT, --port PORT Specify port to use for connection to the target + -I KEY, --key KEY Specify ssh private key for connection to the target + -S, --strip Strip executables prior to deploying (default: False). The default value of this option can be controlled by setting the strip option in the [Deploy] + section to True or False. + --no-strip Do not strip executables prior to deploy +""") + +class UnDeployTarget(OebuildCommand): + ''' + we use package in a + ''' + def __init__(self) -> None: + super().__init__( + '{}', + 'undeploy software on line', + textwrap.dedent('''\ +Un-deploys recipe output files previously deployed to a live target machine by devtool deploy-target. +''' + )) + + def do_add_parser(self, parser_adder) -> argparse.ArgumentParser: + parser = self._parser( + parser_adder, + usage=''' +oebuild undeploy-target [-h] [-c] [-s] [-a] [-n] [-e SSH_EXEC] [-P PORT] [-I KEY] [recipename] target +''') + + return parser + + def do_run(self, args: argparse.Namespace, unknown = None): + if '-h' in unknown or '--help' in unknown: + self.print_help_msg() + return + str_args = ' '.join(unknown) + com_target = ComTarget() + com_target.exec(str_args=str_args, fun="undeploy-target") + + def print_help_msg(self): + print(""" + +usage: oebuild undeploy-target [-h] [-c] [-s] [-a] [-n] [-e SSH_EXEC] [-P PORT] [-I KEY] [recipename] target + +Un-deploys recipe output files previously deployed to a live target machine by devtool deploy-target. + +arguments: + recipename Recipe to undeploy (if not using -a/--all) + target Live target machine running an ssh server: user@hostname + +options: + -h, --help show this help message and exit + -c, --no-host-check Disable ssh host key checking + -s, --show-status Show progress/status output + -a, --all Undeploy all recipes deployed on the target + -n, --dry-run List files to be undeployed only + -e SSH_EXEC, --ssh-exec SSH_EXEC + Executable to use in place of ssh + -P PORT, --port PORT Specify port to use for connection to the target + -I KEY, --key KEY Specify ssh private key for connection to the target +""") diff --git a/src/oebuild/app/plugins/deploy/target_dev/deploy.py b/src/oebuild/app/plugins/deploy/target_dev/deploy.py new file mode 100644 index 0000000000000000000000000000000000000000..f799cb177f59467d7aaff1300daf9bdeb8d2d18b --- /dev/null +++ b/src/oebuild/app/plugins/deploy/target_dev/deploy.py @@ -0,0 +1,366 @@ +# Development tool - deploy/undeploy command plugin +# +# Copyright (C) 2014-2016 Intel Corporation +# +# SPDX-License-Identifier: GPL-2.0-only +# +"""Devtool plugin containing the deploy subcommands""" + +import logging +import os +import shutil +import subprocess +import tempfile + +import bb.utils +import argparse_oe +import oe.types + +from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError + +logger = logging.getLogger('devtool') + +deploylist_path = '/.devtool' + +def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=False, nopreserve=False, nocheckspace=False): + """ + Prepare a shell script for running on the target to + deploy/undeploy files. We have to be careful what we put in this + script - only commands that are likely to be available on the + target are suitable (the target might be constrained, e.g. using + busybox rather than bash with coreutils). + """ + lines = [] + lines.append('#!/bin/sh') + lines.append('set -e') + if undeployall: + # Yes, I know this is crude - but it does work + lines.append('for entry in %s/*.list; do' % deploylist_path) + lines.append('[ ! -f $entry ] && exit') + lines.append('set `basename $entry | sed "s/.list//"`') + if dryrun: + if not deploy: + lines.append('echo "Previously deployed files for $1:"') + lines.append('manifest="%s/$1.list"' % deploylist_path) + lines.append('preservedir="%s/$1.preserve"' % deploylist_path) + lines.append('if [ -f $manifest ] ; then') + # Read manifest in reverse and delete files / remove empty dirs + lines.append(' sed \'1!G;h;$!d\' $manifest | while read file') + lines.append(' do') + if dryrun: + lines.append(' if [ ! -d $file ] ; then') + lines.append(' echo $file') + lines.append(' fi') + else: + lines.append(' if [ -d $file ] ; then') + # Avoid deleting a preserved directory in case it has special perms + lines.append(' if [ ! -d $preservedir/$file ] ; then') + lines.append(' rmdir $file > /dev/null 2>&1 || true') + lines.append(' fi') + lines.append(' else') + lines.append(' rm -f $file') + lines.append(' fi') + lines.append(' done') + if not dryrun: + lines.append(' rm $manifest') + if not deploy and not dryrun: + # May as well remove all traces + lines.append(' rmdir `dirname $manifest` > /dev/null 2>&1 || true') + lines.append('fi') + + if deploy: + if not nocheckspace: + # Check for available space + # FIXME This doesn't take into account files spread across multiple + # partitions, but doing that is non-trivial + # Find the part of the destination path that exists + lines.append('checkpath="$2"') + lines.append('while [ "$checkpath" != "/" ] && [ ! -e $checkpath ]') + lines.append('do') + lines.append(' checkpath=`dirname "$checkpath"`') + lines.append('done') + lines.append(r'freespace=$(df -P $checkpath | sed -nre "s/^(\S+\s+){3}([0-9]+).*/\2/p")') + # First line of the file is the total space + lines.append('total=`head -n1 $3`') + lines.append('if [ $total -gt $freespace ] ; then') + lines.append(' echo "ERROR: insufficient space on target (available ${freespace}, needed ${total})"') + lines.append(' exit 1') + lines.append('fi') + if not nopreserve: + # Preserve any files that exist. Note that this will add to the + # preserved list with successive deployments if the list of files + # deployed changes, but because we've deleted any previously + # deployed files at this point it will never preserve anything + # that was deployed, only files that existed prior to any deploying + # (which makes the most sense) + lines.append('cat $3 | sed "1d" | while read file fsize') + lines.append('do') + lines.append(' if [ -e $file ] ; then') + lines.append(' dest="$preservedir/$file"') + lines.append(' mkdir -p `dirname $dest`') + lines.append(' mv $file $dest') + lines.append(' fi') + lines.append('done') + lines.append('rm $3') + lines.append('mkdir -p `dirname $manifest`') + lines.append('mkdir -p $2') + if verbose: + lines.append(' tar xv -C $2 -f - | tee $manifest') + else: + lines.append(' tar xv -C $2 -f - > $manifest') + lines.append('sed -i "s!^./!$2!" $manifest') + elif not dryrun: + # Put any preserved files back + lines.append('if [ -d $preservedir ] ; then') + lines.append(' cd $preservedir') + # find from busybox might not have -exec, so we don't use that + lines.append(' find . -type f | while read file') + lines.append(' do') + lines.append(' mv $file /$file') + lines.append(' done') + lines.append(' cd /') + lines.append(' rm -rf $preservedir') + lines.append('fi') + + if undeployall: + if not dryrun: + lines.append('echo "NOTE: Successfully undeployed $1"') + lines.append('done') + + # Delete the script itself + lines.append('rm $0') + lines.append('') + + return '\n'.join(lines) + + + +def deploy(args, config, basepath, workspace): + """Entry point for the devtool 'deploy' subcommand""" + import math + import oe.recipeutils + import oe.package + + # check_workspace_recipe(workspace, args.recipename, checksrc=False) + + try: + host, destdir = args.target.split(':') + except ValueError: + destdir = '/' + else: + args.target = host + if not destdir.endswith('/'): + destdir += '/' + + tinfoil = setup_tinfoil(basepath=basepath) + try: + try: + rd = tinfoil.parse_recipe(args.recipename) + except Exception as e: + raise DevtoolError('Exception parsing recipe %s: %s' % + (args.recipename, e)) + recipe_outdir = rd.getVar('D') + if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir): + raise DevtoolError('No files to deploy - have you built the %s ' + 'recipe? If so, the install step has not installed ' + 'any files.' % args.recipename) + + if args.strip and not args.dry_run: + # Fakeroot copy to new destination + srcdir = recipe_outdir + recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'devtool-deploy-target-stripped') + if os.path.isdir(recipe_outdir): + exec_fakeroot(rd, "rm -rf %s" % recipe_outdir, shell=True) + exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True) + os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or '']) + oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'), + rd.getVar('base_libdir'), rd) + + filelist = [] + inodes = set({}) + ftotalsize = 0 + for root, _, files in os.walk(recipe_outdir): + for fn in files: + fstat = os.lstat(os.path.join(root, fn)) + # Get the size in kiB (since we'll be comparing it to the output of du -k) + # MUST use lstat() here not stat() or getfilesize() since we don't want to + # dereference symlinks + if fstat.st_ino in inodes: + fsize = 0 + else: + fsize = int(math.ceil(float(fstat.st_size)/1024)) + inodes.add(fstat.st_ino) + ftotalsize += fsize + # The path as it would appear on the target + fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn) + filelist.append((fpath, fsize)) + + if args.dry_run: + print('Files to be deployed for %s on target %s:' % (args.recipename, args.target)) + for item, _ in filelist: + print(' %s' % item) + return 0 + + extraoptions = '' + if args.no_host_check: + extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' + if not args.show_status: + extraoptions += ' -q' + + scp_sshexec = '' + ssh_sshexec = 'ssh' + if args.ssh_exec: + scp_sshexec = "-S %s" % args.ssh_exec + ssh_sshexec = args.ssh_exec + scp_port = '' + ssh_port = '' + if args.port: + scp_port = "-P %s" % args.port + ssh_port = "-p %s" % args.port + + if args.key: + extraoptions += ' -i %s' % args.key + + # In order to delete previously deployed files and have the manifest file on + # the target, we write out a shell script and then copy it to the target + # so we can then run it (piping tar output to it). + # (We cannot use scp here, because it doesn't preserve symlinks.) + tmpdir = tempfile.mkdtemp(prefix='devtool') + try: + tmpscript = '/tmp/devtool_deploy.sh' + tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list') + shellscript = _prepare_remote_script(deploy=True, + verbose=args.show_status, + nopreserve=args.no_preserve, + nocheckspace=args.no_check_space) + # Write out the script to a file + with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f: + f.write(shellscript) + # Write out the file list + with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f: + f.write('%d\n' % ftotalsize) + for fpath, fsize in filelist: + f.write('%s %d\n' % (fpath, fsize)) + # Copy them to the target + ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True) + if ret != 0: + raise DevtoolError('Failed to copy script to %s - rerun with -s to ' + 'get a complete error message' % args.target) + finally: + shutil.rmtree(tmpdir) + + # Now run the script + ret = exec_fakeroot(rd, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True) + if ret != 0: + raise DevtoolError('Deploy failed - rerun with -s to get a complete ' + 'error message') + + logger.info('Successfully deployed %s' % recipe_outdir) + + files_list = [] + for root, _, files in os.walk(recipe_outdir): + for filename in files: + filename = os.path.relpath(os.path.join(root, filename), recipe_outdir) + files_list.append(os.path.join(destdir, filename)) + finally: + tinfoil.shutdown() + + return 0 + +def undeploy(args, config, basepath, workspace): + """Entry point for the devtool 'undeploy' subcommand""" + if args.all and args.recipename: + raise argparse_oe.ArgumentUsageError('Cannot specify -a/--all with a recipe name', 'undeploy-target') + elif not args.recipename and not args.all: + raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target') + + extraoptions = '' + if args.no_host_check: + extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' + if not args.show_status: + extraoptions += ' -q' + + scp_sshexec = '' + ssh_sshexec = 'ssh' + if args.ssh_exec: + scp_sshexec = "-S %s" % args.ssh_exec + ssh_sshexec = args.ssh_exec + scp_port = '' + ssh_port = '' + if args.port: + scp_port = "-P %s" % args.port + ssh_port = "-p %s" % args.port + + args.target = args.target.split(':')[0] + + tmpdir = tempfile.mkdtemp(prefix='devtool') + try: + tmpscript = '/tmp/devtool_undeploy.sh' + shellscript = _prepare_remote_script(deploy=False, dryrun=args.dry_run, undeployall=args.all) + # Write out the script to a file + with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f: + f.write(shellscript) + # Copy it to the target + ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True) + if ret != 0: + raise DevtoolError('Failed to copy script to %s - rerun with -s to ' + 'get a complete error message' % args.target) + finally: + shutil.rmtree(tmpdir) + + # Now run the script + ret = subprocess.call('%s %s %s %s \'sh %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True) + if ret != 0: + raise DevtoolError('Undeploy failed - rerun with -s to get a complete ' + 'error message') + + if not args.all and not args.dry_run: + logger.info('Successfully undeployed %s' % args.recipename) + return 0 + + +def register_commands(subparsers, context): + """Register devtool subcommands from the deploy plugin""" + + parser_deploy = subparsers.add_parser('deploy-target', + help='Deploy recipe output files to live target machine', + description='Deploys a recipe\'s build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have been installed on the target beforehand.', + group='testbuild') + parser_deploy.add_argument('recipename', help='Recipe to deploy') + parser_deploy.add_argument('target', help='Live target machine running an ssh server: user@hostname[:destdir]') + parser_deploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true') + parser_deploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true') + parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true') + parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true') + parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true') + parser_deploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh') + parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target') + parser_deploy.add_argument('-I', '--key', + help='Specify ssh private key for connection to the target') + + strip_opts = parser_deploy.add_mutually_exclusive_group(required=False) + strip_opts.add_argument('-S', '--strip', + help='Strip executables prior to deploying (default: %(default)s). ' + 'The default value of this option can be controlled by setting the strip option in the [Deploy] section to True or False.', + default=oe.types.boolean(context.config.get('Deploy', 'strip', default='0')), + action='store_true') + strip_opts.add_argument('--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false') + + parser_deploy.set_defaults(func=deploy) + + parser_undeploy = subparsers.add_parser('undeploy-target', + help='Undeploy recipe output files in live target machine', + description='Un-deploys recipe output files previously deployed to a live target machine by devtool deploy-target.', + group='testbuild') + parser_undeploy.add_argument('recipename', help='Recipe to undeploy (if not using -a/--all)', nargs='?') + parser_undeploy.add_argument('target', help='Live target machine running an ssh server: user@hostname') + parser_undeploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true') + parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true') + parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true') + parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true') + parser_undeploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh') + parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target') + parser_undeploy.add_argument('-I', '--key', + help='Specify ssh private key for connection to the target') + + parser_undeploy.set_defaults(func=undeploy) diff --git a/src/oebuild/app/plugins/deploy/target_dev/oebuild_dev b/src/oebuild/app/plugins/deploy/target_dev/oebuild_dev new file mode 100755 index 0000000000000000000000000000000000000000..bfaeaf04753739ea1aed4b1418743b58df3a5e9e --- /dev/null +++ b/src/oebuild/app/plugins/deploy/target_dev/oebuild_dev @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 + +# OpenEmbedded Development tool +# +# Copyright (C) 2014-2015 Intel Corporation +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import sys +import os +import argparse +import glob +import re +import configparser +import subprocess +import logging + +basepath = '' +workspace = {} +config = None +context = None + + +scripts_path = os.path.dirname(os.path.realpath(__file__)) +lib_path = "/usr1/openeuler/src/yocto-poky/scripts/lib" +sys.path = sys.path + [lib_path] +from devtool import DevtoolError, setup_tinfoil +import scriptutils +import argparse_oe +logger = scriptutils.logger_create('devtool') + +plugins = [] + + +class ConfigHandler(object): + config_file = '' + config_obj = None + init_path = '' + workspace_path = '' + + def __init__(self, filename): + self.config_file = filename + self.config_obj = configparser.ConfigParser() + + def get(self, section, option, default=None): + try: + ret = self.config_obj.get(section, option) + except (configparser.NoOptionError, configparser.NoSectionError): + if default != None: + ret = default + else: + raise + return ret + + def read(self): + if os.path.exists(self.config_file): + self.config_obj.read(self.config_file) + + if self.config_obj.has_option('General', 'init_path'): + pth = self.get('General', 'init_path') + self.init_path = os.path.join(basepath, pth) + if not os.path.exists(self.init_path): + logger.error('init_path %s specified in config file cannot be found' % pth) + return False + else: + self.config_obj.add_section('General') + + self.workspace_path = self.get('General', 'workspace_path', os.path.join(basepath, 'workspace')) + return True + + + def write(self): + logger.debug('writing to config file %s' % self.config_file) + self.config_obj.set('General', 'workspace_path', self.workspace_path) + with open(self.config_file, 'w') as f: + self.config_obj.write(f) + + def set(self, section, option, value): + if not self.config_obj.has_section(section): + self.config_obj.add_section(section) + self.config_obj.set(section, option, value) + +class Context: + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +def read_workspace(): + global workspace + workspace = {} + if not os.path.exists(os.path.join(config.workspace_path, 'conf', 'layer.conf')): + if context.fixed_setup: + logger.error("workspace layer not set up") + sys.exit(1) + else: + logger.info('Creating workspace layer in %s' % config.workspace_path) + _create_workspace(config.workspace_path, config, basepath) + if not context.fixed_setup: + _enable_workspace_layer(config.workspace_path, config, basepath) + + logger.debug('Reading workspace in %s' % config.workspace_path) + externalsrc_re = re.compile(r'^EXTERNALSRC(_pn-([^ =]+))? *= *"([^"]*)"$') + for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')): + with open(fn, 'r') as f: + pnvalues = {} + for line in f: + res = externalsrc_re.match(line.rstrip()) + if res: + recipepn = os.path.splitext(os.path.basename(fn))[0].split('_')[0] + pn = res.group(2) or recipepn + # Find the recipe file within the workspace, if any + bbfile = os.path.basename(fn).replace('.bbappend', '.bb').replace('%', '*') + recipefile = glob.glob(os.path.join(config.workspace_path, + 'recipes', + recipepn, + bbfile)) + if recipefile: + recipefile = recipefile[0] + pnvalues['srctree'] = res.group(3) + pnvalues['bbappend'] = fn + pnvalues['recipefile'] = recipefile + elif line.startswith('# srctreebase: '): + pnvalues['srctreebase'] = line.split(':', 1)[1].strip() + if pnvalues: + if not pnvalues.get('srctreebase', None): + pnvalues['srctreebase'] = pnvalues['srctree'] + logger.debug('Found recipe %s' % pnvalues) + workspace[pn] = pnvalues + +def create_workspace(args, config, basepath, workspace): + if args.layerpath: + workspacedir = os.path.abspath(args.layerpath) + else: + workspacedir = os.path.abspath(os.path.join(basepath, 'workspace')) + _create_workspace(workspacedir, config, basepath) + if not args.create_only: + _enable_workspace_layer(workspacedir, config, basepath) + +def _create_workspace(workspacedir, config, basepath): + import bb + + confdir = os.path.join(workspacedir, 'conf') + if os.path.exists(os.path.join(confdir, 'layer.conf')): + logger.info('Specified workspace already set up, leaving as-is') + else: + # Add a config file + bb.utils.mkdirhier(confdir) + with open(os.path.join(confdir, 'layer.conf'), 'w') as f: + f.write('# ### workspace layer auto-generated by devtool ###\n') + f.write('BBPATH =. "$' + '{LAYERDIR}:"\n') + f.write('BBFILES += "$' + '{LAYERDIR}/recipes/*/*.bb \\\n') + f.write(' $' + '{LAYERDIR}/appends/*.bbappend"\n') + f.write('BBFILE_COLLECTIONS += "workspacelayer"\n') + f.write('BBFILE_PATTERN_workspacelayer = "^$' + '{LAYERDIR}/"\n') + f.write('BBFILE_PATTERN_IGNORE_EMPTY_workspacelayer = "1"\n') + f.write('BBFILE_PRIORITY_workspacelayer = "99"\n') + f.write('LAYERSERIES_COMPAT_workspacelayer = "${LAYERSERIES_COMPAT_core}"\n') + # Add a README file + with open(os.path.join(workspacedir, 'README'), 'w') as f: + f.write('This layer was created by the OpenEmbedded devtool utility in order to\n') + f.write('contain recipes and bbappends that are currently being worked on. The idea\n') + f.write('is that the contents is temporary - once you have finished working on a\n') + f.write('recipe you use the appropriate method to move the files you have been\n') + f.write('working on to a proper layer. In most instances you should use the\n') + f.write('devtool utility to manage files within it rather than modifying files\n') + f.write('directly (although recipes added with "devtool add" will often need\n') + f.write('direct modification.)\n') + f.write('\nIf you no longer need to use devtool or the workspace layer\'s contents\n') + f.write('you can remove the path to this workspace layer from your conf/bblayers.conf\n') + f.write('file (and then delete the layer, if you wish).\n') + f.write('\nNote that by default, if devtool fetches and unpacks source code, it\n') + f.write('will place it in a subdirectory of a "sources" subdirectory of the\n') + f.write('layer. If you prefer it to be elsewhere you can specify the source\n') + f.write('tree path on the command line.\n') + +def _enable_workspace_layer(workspacedir, config, basepath): + """Ensure the workspace layer is in bblayers.conf""" + import bb + bblayers_conf = os.path.join(basepath, 'conf', 'bblayers.conf') + if not os.path.exists(bblayers_conf): + logger.error('Unable to find bblayers.conf') + return + if os.path.abspath(workspacedir) != os.path.abspath(config.workspace_path): + removedir = config.workspace_path + else: + removedir = None + _, added = bb.utils.edit_bblayers_conf(bblayers_conf, workspacedir, removedir) + if added: + logger.info('Enabling workspace layer in bblayers.conf') + if config.workspace_path != workspacedir: + # Update our config to point to the new location + config.workspace_path = workspacedir + config.write() + + +def main(): + global basepath + global config + global context + + if sys.getfilesystemencoding() != "utf-8": + sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.") + + context = Context(fixed_setup=False) + + # Default basepath + basepath = os.path.dirname(os.path.abspath(__file__)) + + parser = argparse_oe.ArgumentParser(description="OpenEmbedded development tool", + add_help=False, + epilog="Use %(prog)s --help to get help on a specific command") + parser.add_argument('--basepath', help='Base directory of SDK / build directory') + parser.add_argument('--bbpath', help='Explicitly specify the BBPATH, rather than getting it from the metadata') + parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true') + parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true') + parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR') + + global_args, unparsed_args = parser.parse_known_args() + + # Help is added here rather than via add_help=True, as we don't want it to + # be handled by parse_known_args() + parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, + help='show this help message and exit') + + if global_args.debug: + logger.setLevel(logging.DEBUG) + elif global_args.quiet: + logger.setLevel(logging.ERROR) + + if global_args.basepath: + # Override + basepath = global_args.basepath + if os.path.exists(os.path.join(basepath, '.devtoolbase')): + context.fixed_setup = True + else: + pth = basepath + while pth != '' and pth != os.sep: + if os.path.exists(os.path.join(pth, '.devtoolbase')): + context.fixed_setup = True + basepath = pth + break + pth = os.path.dirname(pth) + + if not context.fixed_setup: + basepath = os.environ.get('BUILDDIR') + if not basepath: + logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)") + sys.exit(1) + + logger.debug('Using basepath %s' % basepath) + + config = ConfigHandler(os.path.join(basepath, 'conf', 'devtool.conf')) + if not config.read(): + return -1 + context.config = config + + bitbake_subdir = config.get('General', 'bitbake_subdir', '') + if bitbake_subdir: + # Normally set for use within the SDK + logger.debug('Using bitbake subdir %s' % bitbake_subdir) + sys.path.insert(0, os.path.join(basepath, bitbake_subdir, 'lib')) + core_meta_subdir = config.get('General', 'core_meta_subdir') + sys.path.insert(0, os.path.join(basepath, core_meta_subdir, 'lib')) + else: + # Standard location + import scriptpath + bitbakepath = scriptpath.add_bitbake_lib_path() + if not bitbakepath: + logger.error("Unable to find bitbake by searching parent directory of this script or PATH") + sys.exit(1) + logger.debug('Using standard bitbake path %s' % bitbakepath) + scriptpath.add_oe_lib_path() + + scriptutils.logger_setup_color(logger, global_args.color) + + if global_args.bbpath is None: + try: + tinfoil = setup_tinfoil(config_only=True, basepath=basepath) + try: + global_args.bbpath = tinfoil.config_data.getVar('BBPATH') + finally: + tinfoil.shutdown() + except bb.BBHandledException: + return 2 + # Search BBPATH first to allow layers to override plugins in scripts_path + scriptutils.load_plugins(logger, plugins, scripts_path) + subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='') + subparsers.required = True + + subparsers.add_subparser_group('sdk', 'SDK maintenance', -2) + subparsers.add_subparser_group('advanced', 'Advanced', -1) + subparsers.add_subparser_group('starting', 'Beginning work on a recipe', 100) + subparsers.add_subparser_group('info', 'Getting information') + subparsers.add_subparser_group('working', 'Working on a recipe in the workspace') + subparsers.add_subparser_group('testbuild', 'Testing changes on target') + + if not context.fixed_setup: + parser_create_workspace = subparsers.add_parser('create-workspace', + help='Set up workspace in an alternative location', + description='Sets up a new workspace. NOTE: other devtool subcommands will create a workspace automatically as needed, so you only need to use %(prog)s if you want to specify where the workspace should be located.', + group='advanced') + parser_create_workspace.add_argument('layerpath', nargs='?', help='Path in which the workspace layer should be created') + parser_create_workspace.add_argument('--create-only', action="store_true", help='Only create the workspace layer, do not alter configuration') + parser_create_workspace.set_defaults(func=create_workspace, no_workspace=True) + + for plugin in plugins: + if hasattr(plugin, 'register_commands'): + plugin.register_commands(subparsers, context) + + args = parser.parse_args(unparsed_args, namespace=global_args) + + try: + ret = args.func(args, config, basepath, workspace) + except DevtoolError as err: + if str(err): + logger.error(str(err)) + ret = err.exitcode + except argparse_oe.ArgumentUsageError as ae: + parser.error_subcommand(ae.message, ae.subcommand) + + return ret + + +if __name__ == "__main__": + try: + ret = main() + except Exception: + ret = 1 + import traceback + traceback.print_exc() + sys.exit(ret) diff --git a/src/oebuild/app/plugins/run_qemu/run_qemu.py b/src/oebuild/app/plugins/run_qemu/run_qemu.py index 96b3d50b4f5bf430ff295586363d6b5baa4cc855..b48937e06e4abb767d5bba55dd5bd2fda70d3829 100644 --- a/src/oebuild/app/plugins/run_qemu/run_qemu.py +++ b/src/oebuild/app/plugins/run_qemu/run_qemu.py @@ -24,11 +24,6 @@ from oebuild.command import OebuildCommand from oebuild.configure import Configure from oebuild.m_log import logger -CONTAINER_BUILD = '/home/openeuler/build' -DEFAULT_DOCKER = "swr.cn-north-4.myhuaweicloud.com/openeuler-embedded/openeuler-container:latest" -CONTAINER_SRC = '/usr1/openeuler/src' -CONTAINER_USER = "openeuler" - class RunQemu(OebuildCommand): ''' The command for run in qemu platform. @@ -108,12 +103,12 @@ the container {self.container_id} failed to be destroyed, please run self.bak_bash(container=container) self.init_bash(container=container) content = self._get_bashrc_content(container=container) - qemu_helper_usr = CONTAINER_BUILD+"/tmp/work/x86_64-linux/qemu-helper-native/1.0-r1/recipe-sysroot-native/usr" - qemu_helper_dir = CONTAINER_BUILD+"/tmp/work/x86_64-linux/qemu-helper-native" + qemu_helper_usr = oebuild_util.CONTAINER_BUILD+"/tmp/work/x86_64-linux/qemu-helper-native/1.0-r1/recipe-sysroot-native/usr" + qemu_helper_dir = oebuild_util.CONTAINER_BUILD+"/tmp/work/x86_64-linux/qemu-helper-native" STAGING_BINDIR_NATIVE = f""" if [ ! -d {qemu_helper_usr} ];then mkdir -p {qemu_helper_usr} - chown -R {CONTAINER_USER}:{CONTAINER_USER} {qemu_helper_dir} + chown -R {oebuild_util.CONTAINER_USER}:{oebuild_util.CONTAINER_USER} {qemu_helper_dir} ln -s /opt/buildtools/nativesdk/sysroots/x86_64-pokysdk-linux/usr/bin {qemu_helper_usr} fi """ @@ -160,8 +155,8 @@ now, you can continue run `oebuild runqemu` in compile directory volumns = [] volumns.append("/dev/net/tun:/dev/net/tun") volumns.append("/etc/qemu-ifup:/etc/qemu-ifup") - volumns.append(self.work_dir + ':' + CONTAINER_BUILD) - volumns.append(self.configure.source_dir() + ':' + CONTAINER_SRC) + volumns.append(self.work_dir + ':' + oebuild_util.CONTAINER_BUILD) + volumns.append(self.configure.source_dir() + ':' + oebuild_util.CONTAINER_SRC) container:Container = self.client.container_run_simple( image=docker_image, volumes=volumns, @@ -176,7 +171,7 @@ now, you can continue run `oebuild runqemu` in compile directory ''' this is function is to get openeuler docker image automatic ''' - return DEFAULT_DOCKER + return oebuild_util.DEFAULT_DOCKER def bak_bash(self, container: Container): ''' @@ -201,7 +196,7 @@ now, you can continue run `oebuild runqemu` in compile directory content = self._get_bashrc_content(container=container) init_sdk_command = '. /opt/buildtools/nativesdk/environment-setup-x86_64-pokysdk-linux' - init_oe_command = f'. {CONTAINER_SRC}/yocto-poky/oe-init-build-env {CONTAINER_BUILD}' + init_oe_command = f'. {oebuild_util.CONTAINER_SRC}/yocto-poky/oe-init-build-env {oebuild_util.CONTAINER_BUILD}' init_command = [init_sdk_command, init_oe_command] new_content = oebuild_util.init_bashrc_content(content, init_command) self.update_bashrc(container=container, content=new_content) diff --git a/src/oebuild/parse_env.py b/src/oebuild/parse_env.py index b6c7cd45c3edab716c6f076a5773d86e083077e5..16e479849dd0a3cfacb90c80962235b1fa40f634 100644 --- a/src/oebuild/parse_env.py +++ b/src/oebuild/parse_env.py @@ -23,8 +23,6 @@ class EnvContainer: ''' short_id: Optional[str] - volumns: list - @dataclass class Env: ''' @@ -60,35 +58,9 @@ class ParseEnv: if "container" in data: env_container = data['container'] self.env.container = EnvContainer( - short_id=env_container['short_id'], - volumns=env_container['volumns'] + short_id=env_container['short_id'] ) - def is_same_container(self, data: EnvContainer): - ''' - judge if container same with container in env.yaml - ''' - if data.volumns is None: - raise ValueError("the key volumns is lack") - - if self.env is None: - return False - - if self.env.container is None: - return False - - if len(self.env.container.volumns) != len(data.volumns): - return False - - a_gather = set(self.env.container.volumns) - b_gather = set(data.volumns) - - c_gather = a_gather.symmetric_difference(b_gather) - if len(c_gather) != 0: - return False - - return True - def set_env_container(self, env_container: EnvContainer): ''' set ParseEnv's container object @@ -103,8 +75,7 @@ class ParseEnv: if self.env.container is not None: container = self.env.container data['container'] = { - 'short_id': container.short_id, - 'volumns': container.volumns + 'short_id': container.short_id } oebuild_util.write_yaml(pathlib.Path(self.env_dir), data=data) diff --git a/src/oebuild/util.py b/src/oebuild/util.py index a32b61c33573fd2b7a6f7bc3569dd6208901e028..c4c8ed932c1883356619d3112d6a19752caacd07 100644 --- a/src/oebuild/util.py +++ b/src/oebuild/util.py @@ -26,6 +26,13 @@ CONFIG_YAML = 'config.yaml' UPGRADE_YAML = 'upgrade.yaml' COMPILE_YAML = 'compile.yaml.sample' BASH_END_FLAG = " ###!!!###" +CONTAINER_USER = "openeuler" +CONTAINER_BUILD = '/home/openeuler/build' +DEFAULT_DOCKER = "swr.cn-north-4.myhuaweicloud.com/openeuler-embedded/openeuler-container:latest" +CONTAINER_SRC = '/usr1/openeuler/src' +CONTAINER_USER = "openeuler" +SDK_ABSOLATE_PATH = "/opt/buildtools/nativesdk/environment-setup-x86_64-pokysdk-linux" + def read_yaml(yaml_dir : pathlib.Path): ''' diff --git a/src/oebuild/version.py b/src/oebuild/version.py index d4a860cd2b0025039613695326ca12968ac06b8b..12b8e9b56a9f5617a0105e89cefe3a7e5f1d7590 100644 --- a/src/oebuild/version.py +++ b/src/oebuild/version.py @@ -10,4 +10,4 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. ''' -__version__ = '0.0.37' +__version__ = '0.0.38'