From b006e7b05c3abe51651c85f3b5a3cd8a5ef27a4a Mon Sep 17 00:00:00 2001 From: wangyueliang Date: Thu, 11 Jul 2024 17:09:57 +0800 Subject: [PATCH] sync some changes that NestOS don't care about from upstream v0.16.0 --- src/cmd-buildextend-virtualbox | 1 + src/cmd-generate-release-meta | 31 ++- src/cmd-koji-upload | 197 ++++++++++++++++-- src/cmd-meta | 4 +- src/cmd-oc-adm-release | 50 +---- src/cmd-powervs-replicate | 1 + src/cmd-sign | 9 + src/cmd-test-coreos-installer | 2 +- src/cmd-upload-oscontainer | 135 +----------- ...pload-oscontainer-deprecated-legacy-format | 134 ++++++++++++ src/cosalib/fedora_messaging_request.py | 19 +- src/cosalib/ova.py | 21 +- src/image-default.yaml | 13 +- src/virtualbox-template.xml | 127 +++++++++++ src/vmware-template.xml | 8 +- 15 files changed, 534 insertions(+), 218 deletions(-) create mode 120000 src/cmd-buildextend-virtualbox create mode 120000 src/cmd-powervs-replicate mode change 100755 => 120000 src/cmd-upload-oscontainer create mode 100755 src/cmd-upload-oscontainer-deprecated-legacy-format create mode 100644 src/virtualbox-template.xml diff --git a/src/cmd-buildextend-virtualbox b/src/cmd-buildextend-virtualbox new file mode 120000 index 00000000..c0388001 --- /dev/null +++ b/src/cmd-buildextend-virtualbox @@ -0,0 +1 @@ +cmd-artifact-disk \ No newline at end of file diff --git a/src/cmd-generate-release-meta b/src/cmd-generate-release-meta index 5466c678..5f8a78ea 100755 --- a/src/cmd-generate-release-meta +++ b/src/cmd-generate-release-meta @@ -27,11 +27,7 @@ def ensure_dup(inp, out, inp_key, out_key): def url_builder(stream, version, arch, path): - # This is a bug to be fixed with later work on https://github.com/openshift/os/issues/477 - if args.distro == 'rhcos': - return f"{args.stream_baseurl}/{stream}/{version}/{arch}/{path}" - else: - return f"{args.stream_baseurl}/{stream}/builds/{version}/{arch}/{path}" + return f"{args.stream_baseurl}/{stream}/builds/{version}/{arch}/{path}" def get_extension(path, modifier, arch): @@ -95,6 +91,18 @@ if os.path.exists(args.output) and os.stat(args.output).st_size > 0: print(f"Using existing release file {args.output}") +def get_floating_tag(rel, tags): + found = "" + for tag in tags: + if rel not in tag: + if found != "": + raise f"multiple floating tags within: {tags}" + found = tag + if found == "": + raise f"failed to find floating tag within: {tags}" + return found + + # Append the coreos-assembler build json `input_` to `out`, the target release stream. def append_build(out, input_): arch = input_.get("coreos-assembler.basearch") @@ -124,7 +132,7 @@ def append_build(out, input_): # build the architectures dict arch_dict = {"media": {}} ensure_dup(input_, arch_dict, "ostree-commit", "commit") - platforms = ["aliyun", "applehv", "aws", "azure", "azurestack", "digitalocean", "exoscale", "gcp", "hyperv", "ibmcloud", "metal", "nutanix", "openstack", "powervs", "qemu", "vmware", "vultr"] + platforms = ["aliyun", "applehv", "aws", "azure", "azurestack", "digitalocean", "exoscale", "gcp", "hyperv", "ibmcloud", "kubevirt", "metal", "nutanix", "openstack", "powervs", "qemu", "virtualbox", "vmware", "vultr", "qemu-secex"] for platform in platforms: if input_.get("images", {}).get(platform, None) is not None: print(f" - {platform}") @@ -171,6 +179,17 @@ def append_build(out, input_): # remove the url as we haven't decided to expose that information publicly yet arch_dict["media"]["gcp"]["image"].pop("url") + # KubeVirt specific additions: https://github.com/coreos/stream-metadata-go/pull/41 + if input_.get("kubevirt", None) is not None: + arch_dict["media"].setdefault("kubevirt", {}).setdefault("image", {}) + # The `image` field uses a floating tag and the `digest-ref` field uses + # a digest pullspec. See: https://github.com/coreos/stream-metadata-go/pull/46. + tag = get_floating_tag(input_["buildid"], input_["kubevirt"]["tags"]) + arch_dict["media"]["kubevirt"]["image"] = { + "image": input_["kubevirt"]["image"] + f":{tag}", + "digest-ref": input_["kubevirt"]["image"] + "@" + input_["kubevirt"]["digest"], + } + # Azure: https://github.com/coreos/stream-metadata-go/issues/13 inputaz = input_.get("azure") if inputaz is not None: diff --git a/src/cmd-koji-upload b/src/cmd-koji-upload index 845e77d9..610ca093 100755 --- a/src/cmd-koji-upload +++ b/src/cmd-koji-upload @@ -1,4 +1,5 @@ #!/usr/bin/python3 -u +# pylint: disable=E1136 """ cmd-koji-upload performs the required steps to make COSA a Koji Content Generator. When running this in an automated fashion, you will need a Kerberos @@ -192,6 +193,36 @@ class Build(_Build): return fname, True + def get_rpm_list(self, host=None): + """ + Translate commitmeta.json/HOST OS rpms into a json list + Returns the json rpms list + """ + components = [] + if host is None: + rpms = self.commit["rpmostree.rpmdb.pkglist"] + else: + host_rpms = subprocess.check_output('rpm -qa --qf="%{NAME}:%{EPOCH}:%{RELEASE}:%{VERSION}:%{ARCH}:%{SIGMD5}:%{SIGPGP} \n"', shell=True).strip() + rpms = (host_rpms.decode('utf-8')).split("\n") + + for rpm in rpms: + if host is None: + name, epoch, version, release, arch = rpm + sigmd5, sigpgp, epoch = None, None, None + else: + name, epoch, release, version, arch, sigmd5, sigpgp = rpm.split(':') + entry = { + "type": "rpm", + "name": name, + "version": version, + "release": release, + "epoch": epoch, + "arch": arch, + "sigmd5": sigmd5, + "sigpgp": sigpgp} + components.append(entry) + return components + def mutate_for_koji(self, fname): """ Koji is _so_ pendantic about the naming of files and their extensions, @@ -375,6 +406,71 @@ class _KojiBase(): return session +class Search(_KojiBase): + """ + Search for builds + """ + + def __init__(self, profile): + """ + Creates a new instance for search. + + :param profile: Koji profile name in /etc/koji.conf.d + :type str + """ + super().__init__(profile) + + def get_state(self, nvr): + """ + Return the build state. + :param nvr: The nvr name from Brew + + For more about build state see: + https://pagure.io/koji/blob/master/f/www/kojiweb/builds.chtml#_27 + https://pagure.io/koji/blob/master/f/tests/test_cli/test_import.py#_73 + """ + + info = self.session.getBuild(nvr, strict=False) + if (info): + return (info['state']) + + # Don't return anything else if no build was found. + # Since the build states are describe as numbers from 0 + # to 4, let's get an empty return + return "" + + def check_tag(self, nvr, tag): + """ + Check if the build contains the tag + :param nvr: The nvr name from Brew + :param tag: The tag to be checked + """ + + tags = self.session.listTags(build=nvr) + for build_tag in tags: + if tag == build_tag['name']: + return True + + return False + + def ensure_tag(self, nvr, tag): + """ + Ensure if the build contains the tag + :param nvr: The nvr name from Brew + :param tag: The tag to be checked + """ + + if not self.check_tag(nvr, tag): + log.info('Build %s was not tagged. Adding tag: %s' % (nvr, tag)) + task_id = self.session.tagBuild(tag, nvr) + task_result = klib.watch_tasks(self.session, [task_id], quiet=True, poll_interval=15) + if task_result != 0: + raise Exception('failed to tag builds') + log.info('Tag %s successfully added' % (tag)) + else: + log.info('Tag %s already exists' % (tag)) + + class Reserve(_KojiBase): """ Reserves a place in Koji for later archival. @@ -398,12 +494,21 @@ class Reserve(_KojiBase): """ log.info("Reserving a unique koji id") - release = datetime.datetime.utcnow().strftime("%H%M%S") - + # The koji/brew NVR is constructed like so: + # Name = "rhcos-$arch", like `rhcos-x86_64` + # Version = Everything before `-` in RHCOS version + # Release = Everything after `-` in RHCOS version + # + # Example: RHCOS Build ID: 414.92.202307170903-0 for x86_64 + # Name = rhcos-x86_64 + # Version = 414.92.202307170903 + # Release = 0 + # NVR = rhcos-x86_64-414.92.202307170903-0 + version, release = build.build_id.split('-') data = { "name": f"{build.build_name}-{build.basearch}", "release": release, - "version": f"{build.build_id.replace('-', '.')}", + "version": version, "cg": "coreos-assembler", } @@ -464,10 +569,13 @@ class Upload(_KojiBase): self._session = None self._tag = tag self._image_files = None - self._release = None self._reserve_id_file = None self._retry_attempts = 2 self._uploaded = False + self._s3 = None + self._s3_bucket = None + self._s3_key = None + self._s3_url = None if self._tag is None: raise Exception("build tag must be set") @@ -524,7 +632,8 @@ class Upload(_KojiBase): "filename": obj['upload_path'], "filesize": obj["size"], "type": ext, - "extra": {"image": {"arch": arch}} + "extra": {"image": {"arch": arch}}, + "components": "" } if etype not in ("image", "source"): @@ -546,6 +655,8 @@ class Upload(_KojiBase): for _, value in (self.build).get_artifacts(): file_output = self.get_file_meta(value) if file_output is not None: + if "commitmeta.json" in value['upload_path']: + file_output["components"] = self.build.get_rpm_list() outputs.append(file_output) self._image_files = outputs return self._image_files @@ -560,7 +671,6 @@ class Upload(_KojiBase): now = datetime.datetime.utcnow() stamp = now.strftime("%s") - self.release = now.strftime("%H%M%S") """ Koji has a couple of checks to ensure the reservation data (build_Id, release, name @@ -591,10 +701,29 @@ class Upload(_KojiBase): except: pass + if self._s3 is not None: + self.build.meta['s3'] = { + 'bucket': self._s3_bucket, + 'key': self._s3_key, + 'public-url': self._s3_url + } + self.build.meta_write() + source = self.build.get_meta_key( "meta", self.build.ckey("container-config-git")) log.debug(f"Preparing manifest for {(len(self.image_files))} files") + # The koji/brew NVR is constructed like so: + # Name = "rhcos-$arch", like `rhcos-x86_64` + # Version = Everything before `-` in RHCOS version + # Release = Everything after `-` in RHCOS version + # + # Example: RHCOS Build ID: 414.92.202307170903-0 for x86_64 + # Name = rhcos-x86_64 + # Version = 414.92.202307170903 + # Release = 0 + # NVR = rhcos-x86_64-414.92.202307170903-0 + version, release = self.build.build_id.split('-') self._manifest = { "metadata_version": 0, "build": { @@ -608,14 +737,11 @@ class Upload(_KojiBase): } }, "name": f"{self.build.build_name}-{self.build.basearch}", - "release": self._release, + "release": release, "owner": self._owner, "source": source['origin'], "start_time": stamp, - # RHCOS wants to be semver-compatible, but Koji doesn't - # accept `-`. See - # https://github.com/openshift/oc/pull/209#issuecomment-564876535 - "version": f"{self.build.build_id.replace('-', '.')}" + "version": version }, "buildroots": [{ "id": 1, @@ -634,7 +760,7 @@ class Upload(_KojiBase): "arch": self.build.basearch, "name": "coreos-assembler" }, - "components": "", + "components": self.build.get_rpm_list('host'), "extra": { "coreos-assembler": { "build_id": 1, @@ -775,6 +901,11 @@ Examples: --keytab keytab \ --owner me@FEDORA.COM \ --profile koji + $ cmd-koji-upload search \ + --nvr nvr \ + --keytab keytab \ + --owner me@FEDORA.COM \ + --profile koji Environment variables are supported: - KOJI_USERNAME will set the owner @@ -794,7 +925,7 @@ Environment variables are supported: parent_parser.add_argument("--buildroot", default="builds", help="Build diretory") parent_parser.add_argument("--dump", default=False, action='store_true', - help="Dump the manfiest and exit") + help="Dump the manifest and exit") parent_parser.add_argument("--no-upload", default=False, action='store_true', help="Do not upload, just parse the build") parent_parser.add_argument("--arch", default=get_basearch(), @@ -821,6 +952,11 @@ Environment variables are supported: upload_cmd = sub_commands.add_parser( "upload", help="Uploads to koji", parents=[parent_parser], add_help=False) + search_cmd = sub_commands.add_parser( + "search", help="Search for a build", parents=[parent_parser], add_help=False) + + ensure_cmd = sub_commands.add_parser( + "ensure-tag", help="Ensure the build tag is correct", parents=[parent_parser], add_help=False) sub_commands.add_parser( "reserve-id", help="Reserves a koji id", parents=[parent_parser], add_help=False) @@ -841,6 +977,30 @@ Environment variables are supported: '--reserve-id-state-file', required=False, help='Uses the path for a reservation file previous created') + upload_cmd.add_argument( + '--s3-bucket', required=False, + help='Store bucket information in meta.json') + + upload_cmd.add_argument( + '--s3-key', required=False, + help='Store key information in meta.json') + + upload_cmd.add_argument( + '--s3-url', required=False, + help='Store url information in meta.json') + + search_cmd.add_argument( + '--nvr', required=True, + help='NVR to look for') + + ensure_cmd.add_argument( + '--nvr', required=True, + help='NVR to look for') + + ensure_cmd.add_argument( + '--tag', required=True, + help='Ensure the tag if the build does not have it') + args, extra_args = parser.parse_known_args() set_logger(args.log_level) @@ -857,7 +1017,10 @@ Environment variables are supported: if args.auth: kinit(args.keytab, args.owner) - + if args._command == 'search': + print(Search(args.profile).get_state(args.nvr)) + if args._command == 'ensure-tag': + Search(args.profile).ensure_tag(args.nvr, args.tag) if args._command == 'upload': upload = Upload(build, args.owner, args.tag, args.profile) @@ -873,6 +1036,12 @@ Environment variables are supported: Reserve(args.profile).reserve_id(build) if args.reserve_id_state_file: upload._reserve_id_file = args.reserve_id_state_file + if args.s3_bucket or args.s3_key or args.s3_url: + upload._s3 = True + upload._s3_bucket = args.s3_bucket + upload._s3_key = args.s3_key + upload._s3_url = args.s3_url + build.build_artifacts() upload.upload() elif args._command == 'reserve-id': diff --git a/src/cmd-meta b/src/cmd-meta index 238c8abd..5b99ae64 100755 --- a/src/cmd-meta +++ b/src/cmd-meta @@ -29,7 +29,7 @@ def new_cli(): action='store_true') parser.add_argument('--schema', help='location of meta.json schema', default=os.environ.get("COSA_META_SCHEMA", - f'{COSA_PATH}/v1.json.json')) + f'{COSA_PATH}/v1.json')) parser.add_argument('--true', dest='bool', default=None, help='set a field', action='store_true') parser.add_argument('--false', dest='bool', default=None, @@ -65,7 +65,7 @@ def new_cli(): def pather(val): path = val.split('.') - if val.startswith("coreos-assembler."): + if val.startswith("coreos-assembler.") or val.startswith("fedora-coreos."): new_path = [f"{path[0]}.{path[1]}"] new_path.extend(path[2:]) return ".".join(new_path) diff --git a/src/cmd-oc-adm-release b/src/cmd-oc-adm-release index 3d148d01..61396e32 100755 --- a/src/cmd-oc-adm-release +++ b/src/cmd-oc-adm-release @@ -15,11 +15,7 @@ import argparse import json import logging as log import os -import shutil import sys -import tarfile -import tempfile -import urllib.request COSA_PATH = os.path.dirname(os.path.abspath(__file__)) @@ -33,7 +29,6 @@ from cosalib.cmdlib import ( os.environ["PATH"] = f"{os.getcwd()}:{COSA_PATH}:{os.environ.get('PATH')}" OCP_SERVER = "https://api.ci.openshift.org" OCP_RELEASE_STREAM = "quay.io/openshift-release-dev/ocp-release" -OCP_TOOL_MIRROR = "https://mirror.openshift.com/pub/openshift-v4/clients/oc/{ocp_ver}/linux/oc.tar.gz" log.basicConfig( format='[%(levelname)s]: %(message)s', @@ -55,32 +50,6 @@ def ocp_versions(meta): return (f"{ocp_major}.{ocp_minor}", f"{os_major}.{os_minor}") -def fetch_ocp_bin(ocp_ver): - """ - Download the specific release oc binary - """ - ret = f"{os.getcwd()}/oc-{ocp_ver}" - if os.path.exists(ret): - log.warning(f"{ret} already exists, skipping download") - return ret - - url = OCP_TOOL_MIRROR.format(ocp_ver=ocp_ver) - log.info(f"Downloading oc tool from {url}") - - oc_gz = tempfile.NamedTemporaryFile() - oc_bin = tempfile.NamedTemporaryFile() - - with urllib.request.urlopen(url) as data: - shutil.copyfileobj(data, oc_gz) - - with tarfile.open(oc_gz.name, mode='r:gz') as td: - shutil.copyfileobj(td.extractfile("oc"), oc_bin) - - shutil.copyfile(oc_bin.name, ret) - os.chmod(ret, 0o755) - return ret - - def release_stream(meta, args, ocp_ver): """ Locate the release version based on the release streams. @@ -111,7 +80,7 @@ if __name__ == '__main__': parser.add_argument('--build', default='latest') parser.add_argument('--schema', help='location of meta.json schema', default=os.environ.get("COSA_META_SCHEMA", - f'{COSA_PATH}/v1.json.json')) + f'{COSA_PATH}/v1.json')) parser.add_argument("--authfile", action="store", required=True, help="Pull secret") parser.add_argument("--arch", action='store', @@ -131,9 +100,7 @@ if __name__ == '__main__': parser.add_argument('--server', action="store", default=OCP_SERVER, help="server to get releases from") - parser.add_argument('--fetch-bin', action='store_true', - help="download the oc binary, overrides --oc-bin"), - parser.add_argument('--oc-bin', action="store", default="", + parser.add_argument('--oc-bin', action="store", default="oc", help="Openshift ocp binary") parser.add_argument("--dry-run", default=False, action='store_true') @@ -152,18 +119,7 @@ if __name__ == '__main__': log.info(f"Generating payload for {ocp_ver} for OS Version {os_ver}") from_release = release_stream(meta, args, ocp_ver) - oc_bin = args.oc_bin - if args.fetch_bin: - oc_bin = fetch_ocp_bin(ocp_ver) - log.info(f"Wrote {os.getcwd()}/{oc_bin}") - elif oc_bin == "": - oc_bin = shutil.which(f"oc-{ocp_ver}") - - if oc_bin is None: - raise Exception("missing ocp binary: please use --fetch-bin, --ocp-bin " - f"or add oc-{ocp_ver} to the path") - - cmd = [oc_bin, "adm", "release", "new", "-a", args.authfile] + cmd = [args.oc_bin, "adm", "release", "new", "-a", args.authfile] if ocp_ver: cmd.extend(["-n", "ocp"]) diff --git a/src/cmd-powervs-replicate b/src/cmd-powervs-replicate new file mode 120000 index 00000000..2ab672a5 --- /dev/null +++ b/src/cmd-powervs-replicate @@ -0,0 +1 @@ +./cmd-ore-wrapper \ No newline at end of file diff --git a/src/cmd-sign b/src/cmd-sign index adef48e5..92cc75fe 100755 --- a/src/cmd-sign +++ b/src/cmd-sign @@ -30,6 +30,9 @@ from gi.repository import GLib, Gio, OSTree # this is really the worst case scenario, it's usually pretty fast otherwise ROBOSIGNATORY_REQUEST_TIMEOUT_SEC = 60 * 60 +# https://pagure.io/fedora-infrastructure/issue/10899#comment-854645 +ROBOSIGNATORY_MESSAGE_PRIORITY = 4 + fedenv = 'prod' @@ -53,6 +56,8 @@ def parse_args(): 'RoboSignatory via fedora-messaging') robosig.add_argument("--s3", metavar='[/PREFIX]', required=True, help="bucket and prefix to S3 builds/ dir") + robosig.add_argument("--aws-config-file", metavar='CONFIG', default="", + help="Path to AWS config file") group = robosig.add_mutually_exclusive_group(required=True) group.add_argument("--ostree", help="sign commit", action='store_true') group.add_argument("--images", help="sign images", action='store_true') @@ -72,6 +77,8 @@ def parse_args(): def cmd_robosignatory(args): + if args.aws_config_file: + os.environ["AWS_CONFIG_FILE"] = args.aws_config_file s3 = boto3.client('s3') args.bucket, args.prefix = get_bucket_and_prefix(args.s3) @@ -119,6 +126,7 @@ def robosign_ostree(args, s3, build, gpgkey): request_type='ostree-sign', config=args.fedmsg_conf, request_timeout=ROBOSIGNATORY_REQUEST_TIMEOUT_SEC, + priority=ROBOSIGNATORY_MESSAGE_PRIORITY, environment=fedenv, body={ 'build_id': args.build, @@ -211,6 +219,7 @@ def robosign_images(args, s3, build, gpgkey): request_type='artifacts-sign', config=args.fedmsg_conf, request_timeout=ROBOSIGNATORY_REQUEST_TIMEOUT_SEC, + priority=ROBOSIGNATORY_MESSAGE_PRIORITY, environment=fedenv, body={ 'build_id': args.build, diff --git a/src/cmd-test-coreos-installer b/src/cmd-test-coreos-installer index 015f62e8..f558161c 100644 --- a/src/cmd-test-coreos-installer +++ b/src/cmd-test-coreos-installer @@ -1,5 +1,5 @@ #!/bin/bash -# Automate an end-to-end run of nestos-installer with the metal image, which then +# Automate an end-to-end run of coreos-installer with the metal image, which then # boots and writes a success message to a virtio-serial port, which we read on the host. set -euo pipefail diff --git a/src/cmd-upload-oscontainer b/src/cmd-upload-oscontainer deleted file mode 100755 index 98e90c3f..00000000 --- a/src/cmd-upload-oscontainer +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/python3 -u -# Upload an oscontainer. This is a wrapper for -# `cosa oscontainer` that just for historical reasons -# used to live downstream in the redhat-coreos pipeline. -# In the future we should just have one `cosa oscontainer` -# command. - -import argparse -import json -import yaml -import os -import shutil -import subprocess -import sys - -cosa_dir = os.path.dirname(os.path.abspath(__file__)) -sys.path.insert(0, cosa_dir) - -from cosalib import cmdlib - -parser = argparse.ArgumentParser() -parser.add_argument("--arch-tag", help="append arch name to push tag", - action='store_true') -parser.add_argument("--name", help="oscontainer name", - action='store', required=True) -parser.add_argument("--from", help="Base image", default='scratch', - dest='from_image') -parser.add_argument("--format", help="Format to use for push") -parser.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", - metavar='DIR', action='append', default=[]) - -args = parser.parse_args() - -with open('builds/builds.json') as f: - builds = json.load(f)['builds'] -if len(builds) == 0: - cmdlib.fatal("No builds found") -latest_build = builds[0]['id'] -arch = cmdlib.get_basearch() -latest_build_path = f"builds/{latest_build}/{arch}" - -metapath = f"{latest_build_path}/meta.json" -with open(metapath) as f: - meta = json.load(f) - -# for backcompat, we auto-build extensions if they're missing -if os.path.exists('src/config/extensions.yaml'): - if 'extensions' not in meta: - cmdlib.runcmd(['coreos-assembler', 'buildextend-extensions']) - with open(metapath) as f: - meta = json.load(f) - assert 'extensions' in meta - -configdir = os.path.abspath('src/config') -oscconfigpath = f'{configdir}/oscontainer.yaml' -# XXX: fold oscontainer.yaml handling into oscontainer.py -configyaml = {} -if os.path.exists(oscconfigpath): - with open(oscconfigpath) as f: - configyaml = yaml.safe_load(f) - -if 'base' in configyaml: - args.from_image = configyaml['base'] - -print("Preparing to upload oscontainer for build: {}".format(latest_build)) -ostree_commit = meta['ostree-commit'] - -tmprepo = "{}/tmp/repo".format(os.getcwd()) -# if tmprepo is not a directory, but is unexpectedly a file, -# just nuke it -if not os.path.isdir(tmprepo) and os.path.exists(tmprepo): - os.remove(tmprepo) - -# if tmprepo is not a directory and not a file, recreate from -# the tarfile -if not os.path.exists(tmprepo): - os.makedirs(tmprepo, exist_ok=True) - ostree_commit_tar = meta['images']['ostree']['path'] - subprocess.check_call(['tar', '-xf', - f'{latest_build_path}/{ostree_commit_tar}', - '-C', tmprepo]) - -tmp_osreleasedir = 'tmp/usrlib-osrelease' -subprocess.check_call(['rm', '-rf', tmp_osreleasedir]) -cmdlib.runcmd(['/usr/bin/ostree', 'checkout', '--repo', tmprepo, - '--user-mode', '--subpath=/usr/lib/os-release', ostree_commit, - tmp_osreleasedir]) -display_name = None -with open(os.path.join(tmp_osreleasedir, "os-release")) as f: - display_name = subprocess.check_output(['/bin/sh', '-c', 'set -euo pipefail; . /proc/self/fd/0 && echo $NAME'], stdin=f, encoding='UTF-8').strip() -if display_name == "": - raise SystemExit(f"Failed to find NAME= in /usr/lib/os-release in commit {ostree_commit}") -shutil.rmtree(tmp_osreleasedir) - -osc_name_and_tag = f"{args.name}:{latest_build}" -if args.arch_tag: - arch = meta.get("coreos-assembler.basearch", cmdlib.get_basearch) - osc_name_and_tag = f"{args.name}:{latest_build}-{arch}" - -# TODO: Use labels for the build hash and avoid pulling the oscontainer -# every time we want to poll. -# TODO: Remove --from -digestfile = "tmp/oscontainer-digest" -# We need to pass the auth file from the unpriv user to the root process -cosa_argv = ['sudo', '--preserve-env=container,DISABLE_TLS_VERIFICATION,SSL_CERT_DIR,SSL_CERT_FILE,REGISTRY_AUTH_FILE,OSCONTAINER_CERT_DIR'] -authfile = os.environ.get("REGISTRY_AUTH_FILE", os.path.expanduser('~/.docker/config.json')) -if not os.path.isfile(authfile): - raise SystemExit(f"Missing {authfile}") -os.environ['REGISTRY_AUTH_FILE'] = authfile -cosa_argv.extend(['/usr/lib/coreos-assembler/oscontainer.py', '--workdir=./tmp', 'build', f"--from={args.from_image}"]) -for d in args.add_directory: - cosa_argv.append(f"--add-directory={d}") -cosa_argv.append(f"--display-name={display_name}") -if 'labeled-packages' in configyaml: - pkgs = ' '.join(configyaml['labeled-packages']) - cosa_argv.append(f"--labeled-packages={pkgs}") -if args.format is not None: - cosa_argv.append(f'--format={args.format}') -subprocess.check_call(cosa_argv + - [f'--digestfile={digestfile}', - '--push', tmprepo, - meta['ostree-commit'], - osc_name_and_tag]) - -with open(digestfile) as f: - osc_digest = f.read().strip() - -# Inject the oscontainer with SHA256 into the build metadata -meta['oscontainer'] = {'image': args.name, - 'digest': osc_digest} -metapath_new = f"{metapath}.new" -with open(metapath_new, 'w') as f: - json.dump(meta, f, sort_keys=True) -shutil.move(metapath_new, metapath) diff --git a/src/cmd-upload-oscontainer b/src/cmd-upload-oscontainer new file mode 120000 index 00000000..c494cf52 --- /dev/null +++ b/src/cmd-upload-oscontainer @@ -0,0 +1 @@ +cmd-upload-oscontainer-deprecated-legacy-format \ No newline at end of file diff --git a/src/cmd-upload-oscontainer-deprecated-legacy-format b/src/cmd-upload-oscontainer-deprecated-legacy-format new file mode 100755 index 00000000..98e90c3f --- /dev/null +++ b/src/cmd-upload-oscontainer-deprecated-legacy-format @@ -0,0 +1,134 @@ +#!/usr/bin/python3 -u +# Upload an oscontainer. This is a wrapper for +# `cosa oscontainer` that just for historical reasons +# used to live downstream in the redhat-coreos pipeline. +# In the future we should just have one `cosa oscontainer` +# command. + +import argparse +import json +import yaml +import os +import shutil +import subprocess +import sys + +cosa_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, cosa_dir) + +from cosalib import cmdlib + +parser = argparse.ArgumentParser() +parser.add_argument("--arch-tag", help="append arch name to push tag", + action='store_true') +parser.add_argument("--name", help="oscontainer name", + action='store', required=True) +parser.add_argument("--from", help="Base image", default='scratch', + dest='from_image') +parser.add_argument("--format", help="Format to use for push") +parser.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", + metavar='DIR', action='append', default=[]) + +args = parser.parse_args() + +with open('builds/builds.json') as f: + builds = json.load(f)['builds'] +if len(builds) == 0: + cmdlib.fatal("No builds found") +latest_build = builds[0]['id'] +arch = cmdlib.get_basearch() +latest_build_path = f"builds/{latest_build}/{arch}" + +metapath = f"{latest_build_path}/meta.json" +with open(metapath) as f: + meta = json.load(f) + +# for backcompat, we auto-build extensions if they're missing +if os.path.exists('src/config/extensions.yaml'): + if 'extensions' not in meta: + cmdlib.runcmd(['coreos-assembler', 'buildextend-extensions']) + with open(metapath) as f: + meta = json.load(f) + assert 'extensions' in meta + +configdir = os.path.abspath('src/config') +oscconfigpath = f'{configdir}/oscontainer.yaml' +# XXX: fold oscontainer.yaml handling into oscontainer.py +configyaml = {} +if os.path.exists(oscconfigpath): + with open(oscconfigpath) as f: + configyaml = yaml.safe_load(f) + +if 'base' in configyaml: + args.from_image = configyaml['base'] + +print("Preparing to upload oscontainer for build: {}".format(latest_build)) +ostree_commit = meta['ostree-commit'] + +tmprepo = "{}/tmp/repo".format(os.getcwd()) +# if tmprepo is not a directory, but is unexpectedly a file, +# just nuke it +if not os.path.isdir(tmprepo) and os.path.exists(tmprepo): + os.remove(tmprepo) + +# if tmprepo is not a directory and not a file, recreate from +# the tarfile +if not os.path.exists(tmprepo): + os.makedirs(tmprepo, exist_ok=True) + ostree_commit_tar = meta['images']['ostree']['path'] + subprocess.check_call(['tar', '-xf', + f'{latest_build_path}/{ostree_commit_tar}', + '-C', tmprepo]) + +tmp_osreleasedir = 'tmp/usrlib-osrelease' +subprocess.check_call(['rm', '-rf', tmp_osreleasedir]) +cmdlib.runcmd(['/usr/bin/ostree', 'checkout', '--repo', tmprepo, + '--user-mode', '--subpath=/usr/lib/os-release', ostree_commit, + tmp_osreleasedir]) +display_name = None +with open(os.path.join(tmp_osreleasedir, "os-release")) as f: + display_name = subprocess.check_output(['/bin/sh', '-c', 'set -euo pipefail; . /proc/self/fd/0 && echo $NAME'], stdin=f, encoding='UTF-8').strip() +if display_name == "": + raise SystemExit(f"Failed to find NAME= in /usr/lib/os-release in commit {ostree_commit}") +shutil.rmtree(tmp_osreleasedir) + +osc_name_and_tag = f"{args.name}:{latest_build}" +if args.arch_tag: + arch = meta.get("coreos-assembler.basearch", cmdlib.get_basearch) + osc_name_and_tag = f"{args.name}:{latest_build}-{arch}" + +# TODO: Use labels for the build hash and avoid pulling the oscontainer +# every time we want to poll. +# TODO: Remove --from +digestfile = "tmp/oscontainer-digest" +# We need to pass the auth file from the unpriv user to the root process +cosa_argv = ['sudo', '--preserve-env=container,DISABLE_TLS_VERIFICATION,SSL_CERT_DIR,SSL_CERT_FILE,REGISTRY_AUTH_FILE,OSCONTAINER_CERT_DIR'] +authfile = os.environ.get("REGISTRY_AUTH_FILE", os.path.expanduser('~/.docker/config.json')) +if not os.path.isfile(authfile): + raise SystemExit(f"Missing {authfile}") +os.environ['REGISTRY_AUTH_FILE'] = authfile +cosa_argv.extend(['/usr/lib/coreos-assembler/oscontainer.py', '--workdir=./tmp', 'build', f"--from={args.from_image}"]) +for d in args.add_directory: + cosa_argv.append(f"--add-directory={d}") +cosa_argv.append(f"--display-name={display_name}") +if 'labeled-packages' in configyaml: + pkgs = ' '.join(configyaml['labeled-packages']) + cosa_argv.append(f"--labeled-packages={pkgs}") +if args.format is not None: + cosa_argv.append(f'--format={args.format}') +subprocess.check_call(cosa_argv + + [f'--digestfile={digestfile}', + '--push', tmprepo, + meta['ostree-commit'], + osc_name_and_tag]) + +with open(digestfile) as f: + osc_digest = f.read().strip() + +# Inject the oscontainer with SHA256 into the build metadata +meta['oscontainer'] = {'image': args.name, + 'digest': osc_digest} +metapath_new = f"{metapath}.new" +with open(metapath_new, 'w') as f: + json.dump(meta, f, sort_keys=True) +shutil.move(metapath_new, metapath) diff --git a/src/cosalib/fedora_messaging_request.py b/src/cosalib/fedora_messaging_request.py index 80384278..9ff9741e 100644 --- a/src/cosalib/fedora_messaging_request.py +++ b/src/cosalib/fedora_messaging_request.py @@ -53,6 +53,7 @@ def send_request_and_wait_for_response(request_type, config=None, environment='prod', request_timeout=DEFAULT_REQUEST_TIMEOUT_SEC, + priority=None, body={}): assert environment in ['prod', 'stg'] assert request_type in ['ostree-sign', 'artifacts-sign', 'ostree-import'] @@ -72,7 +73,8 @@ def send_request_and_wait_for_response(request_type, # Send the message/request send_message(config=config, topic=get_request_topic(request_type, environment), - body={**body, 'request_id': request_id}) + body={**body, 'request_id': request_id}, + priority=priority) # Wait for the response to come back return wait_for_response(cond, request_timeout) @@ -93,7 +95,7 @@ def broadcast_fedmsg(broadcast_type, # Send the message/request send_message(config=config, topic=get_broadcast_topic(broadcast_type, environment), - body=body) + body=body, priority=None) def get_broadcast_topic(broadcast_type, environment): @@ -108,7 +110,7 @@ def get_request_finished_topic(request_type, environment): return get_request_topic(request_type, environment) + '.finished' -def send_message(config, topic, body): +def send_message(config, topic, body, priority): print(f"Sending {topic} with body {body}") # This is a bit hacky; we fork to publish the message here so that we can # load the publishing fedora-messaging config. The TL;DR is: we need auth @@ -117,17 +119,18 @@ def send_message(config, topic, body): # inherit anything by default (like the Twisted state). ctx = mp.get_context('spawn') p = ctx.Process(target=send_message_impl, - args=(config, topic, body)) + args=(config, topic, body, priority)) p.start() p.join() -def send_message_impl(config, topic, body): +def send_message_impl(config, topic, body, priority): if config: conf.load_config(config) - publish( - message.Message(body=body, topic=topic) - ) + msg = message.Message(body=body, topic=topic) + if priority: + msg.priority = priority + publish(msg) def wait_for_response(cond, request_timeout): diff --git a/src/cosalib/ova.py b/src/cosalib/ova.py index 7e0c914c..3fda8168 100644 --- a/src/cosalib/ova.py +++ b/src/cosalib/ova.py @@ -22,6 +22,24 @@ OVA_TEMPLATE_DIR = '/usr/lib/coreos-assembler' # To define new variants that use the QCOW2 disk image, simply, # add its definition below: VARIANTS = { + "virtualbox": { + 'template': 'virtualbox-template.xml', + "image_format": "vmdk", + "image_suffix": "ova", + "platform": "virtualbox", + "convert_options": { + '-o': 'subformat=streamOptimized' + }, + "tar_members": [ + "disk.vmdk" + ], + "tar_flags": [ + # DEFAULT_TAR_FLAGS has -S, which isn't suppported by ustar + '-ch', + # Required by OVF spec + "--format=ustar" + ] + }, "vmware": { 'template': 'vmware-template.xml', "image_format": "vmdk", @@ -85,14 +103,13 @@ class OVA(QemuVariantImage): params = { 'ovf_cpu_count': cpu, 'ovf_memory_mb': memory, + 'secure_boot': secure_boot, 'vsphere_image_name': image, 'vsphere_product_name': product, 'vsphere_product_vendor_name': vendor, 'vsphere_product_version': version, 'vsphere_virtual_system_type': system_type, 'vsphere_os_type': os_type, - 'vsphere_scsi_controller_type': scsi, - 'vsphere_network_controller_type': network, 'vmdk_capacity': disk_info.get("virtual-size"), 'vmdk_size': str(vmdk_size), } diff --git a/src/image-default.yaml b/src/image-default.yaml index 5c405df9..4c2dde04 100644 --- a/src/image-default.yaml +++ b/src/image-default.yaml @@ -1,4 +1,5 @@ -# This file contains defaults for image.yaml that is used by create_disk.sh +# This file contains defaults for image.yaml + bootfs: "ext4" rootfs: "xfs" @@ -14,9 +15,19 @@ ostree-format: oci-chunked-v1 ostree-container-inject-openshift-cvo-labels: false # True if we should use `ostree container image deploy` deploy-via-container: false + # Set this to a target container reference, e.g. ostree-unverified-registry:quay.io/example/os:latest # container-imgref: "" # Format used when generating a squashfs image. Can also be e.g. gzip or lz4 squashfs-compression: zstd +# Defaults for VMware OVA, matching historical behavior +vmware-hw-version: 13 +vmware-os-type: rhel7_64Guest +vmware-secure-boot: true + +# Defaults for AWS +aws-imdsv2-only: true +aws-volume-type: "gp3" +aws-x86-boot-mode: "uefi-preferred" diff --git a/src/virtualbox-template.xml b/src/virtualbox-template.xml new file mode 100644 index 00000000..c027c324 --- /dev/null +++ b/src/virtualbox-template.xml @@ -0,0 +1,127 @@ + + + + + + + Virtual disk information + + + + The list of logical networks + + The VM Network network + + + + A virtual machine + {vsphere_image_name} + + The kind of installed guest operating system + Fedora_64 + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + fedora + virtualbox-2.2 + + + hertz * 10^6 + Number of Virtual CPUs + {ovf_cpu_count} virtual CPU(s) + 1 + 3 + {ovf_cpu_count} + + + byte * 2^20 + Memory Size + {ovf_memory_mb} MB of memory + 2 + 4 + {ovf_memory_mb} + + + 0 + sataController0 + SATA Controller + sataController0 + 3 + AHCI + 20 + + + 0 + usb + USB Controller + usb + 4 + 23 + + + 0 + disk1 + Disk Image + disk1 + /disk/vmdisk1 + 5 + 3 + 17 + + + true + Ethernet adapter on 'NAT' + NAT + Ethernet adapter on 'NAT' + 6 + E1000 + 10 + + + + Complete VirtualBox machine configuration in VirtualBox format + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Information about the installed software + {vsphere_product_name} + {vsphere_product_vendor_name} + {vsphere_product_version} + + + diff --git a/src/vmware-template.xml b/src/vmware-template.xml index 70d66bdf..85d99cbb 100644 --- a/src/vmware-template.xml +++ b/src/vmware-template.xml @@ -16,7 +16,7 @@ A virtual machine {vsphere_image_name} - + The kind of installed guest operating system @@ -56,7 +56,7 @@ SCSI Controller SCSI Controller 0 3 - {vsphere_scsi_controller_type} + VirtualSCSI 6 @@ -75,11 +75,13 @@ VmxNet3 ethernet adapter on "VM Network" Network adapter 1 5 - {vsphere_network_controller_type} + VmxNet3 10 + + Information about the installed software -- Gitee