From f282831b7cde835b15fa048d221de63044797733 Mon Sep 17 00:00:00 2001 From: wangyueliang Date: Thu, 11 Jul 2024 18:36:30 +0800 Subject: [PATCH] Sync toolchain virtualization related configuration changes. Note that this commit revert some unreasonable changes in the early stages of NestOS development.such as: xref https://gitee.com/openeuler/nestos-assembler/pulls/14 [upstream] 3715f622e cmd-kola: put basic scenarios logs in different directories c11756e66 kola: Allocate more RAM for secure boot test 0cb63a86c src/kola: Add '--skip-secure-boot' option to skip Secure Boot tests 8967d3b70 cosalib: don't embed full directory paths for ZIP members f2eb092bf cosalib: fix member filename when doing our own compression bb94c5ec3 cosalib/qemuvariants: fix compression skipping for kubevirt 56f533413 Refactor buildextend compression and add zip 3f8113bc6 lib: Handle explicit `skip_compression` option in VARIANTS a90c77113 Add internal qcow2 compression for Nutanix image 240c567fa virt-install: Add `--tpm emulator` 0727e3a04 virt-install: Stop using deprecated `--os-type` 2cf91c99b libguestfish: bump libguestfs VM memory size for ppc64le to 3G cfa0a43d7 libguestfish: bump libguestfs VM memory size for ppc64le 008e04c32 only set LIBGUESTFS_HV for ppc64le on el7 8397c0d0f guestfish: enable SMP in VM for mksquashfs 088fd9175 src: add runvm.sh d8724d48f supermin-init-prelude: add timeout on `udevadm trigger` and make non-fatal de1856ccc supermin: double disk space of supermin vm and use cgroups2 9d50294ce cmdlib.sh: loopify symlink handling in runvm() 286a76280 build: Support `builds/` in workdir being a symlink outside workdir 3dc5f4ffe Drop no longer used `swtpm-wrapper` script --- src/cmd-kola | 21 +- src/cmd-virt-install | 3 +- src/cmdlib.sh | 16 +- src/cosalib/qemuvariants.py | 57 +++- src/libguestfish.sh | 10 +- src/oscontainer-deprecated-legacy-format.py | 325 +++++++++++++++++++ src/oscontainer.py | 326 +------------------- src/runvm.sh | 12 + src/supermin-init-prelude.sh | 7 +- src/swtpm-wrapper | 11 - 10 files changed, 423 insertions(+), 365 deletions(-) create mode 100755 src/oscontainer-deprecated-legacy-format.py mode change 100755 => 120000 src/oscontainer.py create mode 100755 src/runvm.sh delete mode 100755 src/swtpm-wrapper diff --git a/src/cmd-kola b/src/cmd-kola index fc668552..2fe2a20d 100755 --- a/src/cmd-kola +++ b/src/cmd-kola @@ -10,7 +10,8 @@ import sys # Just test these boot to start with. In the future we should at least # do ostree upgrades with uefi etc. But we don't really need the *full* # suite...if podman somehow broke with nvme or uefi I'd be amazed and impressed. -BASIC_SCENARIOS = ["nvme=true", "firmware=uefi", "firmware=uefi-secure"] +BASIC_SCENARIOS = ["nvme=true", "firmware=uefi"] +BASIC_SCENARIOS_SECURE_BOOT = ["firmware=uefi-secure"] arch = platform.machine() cosa_dir = os.path.dirname(os.path.abspath(__file__)) @@ -25,6 +26,7 @@ parser = argparse.ArgumentParser() parser.add_argument("--build", help="Build ID") parser.add_argument("--basic-qemu-scenarios", help="Run the basic test across uefi-secure,nvme etc.", action='store_true') parser.add_argument("--output-dir", help="Output directory") +parser.add_argument("--skip-secure-boot", help="Use with '--basic-qemu-scenarios' to skip the Secure Boot tests", action='store_true') parser.add_argument("--upgrades", help="Run upgrade tests", action='store_true') parser.add_argument("subargs", help="Remaining arguments for kola", nargs='*', default=[]) @@ -52,24 +54,38 @@ if os.getuid() != 0 and len(platformargs) == 0: if args.build is not None: kolaargs.extend(['--build', args.build]) outputdir = args.output_dir or default_output_dir -kolaargs.extend(['--output-dir', outputdir]) subargs = args.subargs or [default_cmd] kolaargs.extend(subargs) kolaargs.extend(unknown_args) if args.basic_qemu_scenarios: if arch == "x86_64": + os.mkdir(outputdir) # Create the toplevel output dir for scenario in BASIC_SCENARIOS: + kolaargs.extend(['--output-dir', + os.path.join(outputdir, scenario.replace('=', '-'))]) subargs = kolaargs + ['--qemu-' + scenario, 'basic'] print(subprocess.list2cmdline(subargs), flush=True) subprocess.check_call(subargs) + if not args.skip_secure_boot: + for scenario in BASIC_SCENARIOS_SECURE_BOOT: + kolaargs.extend(['--output-dir', + os.path.join(outputdir, scenario.replace('=', '-'))]) + # See https://issues.redhat.com/browse/COS-2000 - there's + # some bug with shim/grub2 that fails with secure boot on < ~1300MiB of RAM. + # But we're not going to block on that; real world OCP worker nodes are at least 16GiB etc. + subargs = kolaargs + ['--qemu-' + scenario, 'basic'] + ["--qemu-memory", "1536"] + print(subprocess.list2cmdline(subargs), flush=True) + subprocess.check_call(subargs) else: # Basic qemu scenarios using nvme and uefi # are not supported on multi-arch + kolaargs.extend(['--output-dir', outputdir]) subargs = kolaargs + ['basic'] print(subprocess.list2cmdline(subargs), flush=True) subprocess.check_call(subargs) elif args.upgrades: + kolaargs.extend(['--output-dir', outputdir]) if '--qemu-image-dir' not in unknown_args: os.makedirs('tmp/kola-qemu-cache', exist_ok=True) kolaargs.extend(['--qemu-image-dir', 'tmp/kola-qemu-cache']) @@ -77,6 +93,7 @@ elif args.upgrades: print(subprocess.list2cmdline(kolaargs), flush=True) os.execvp('kola', kolaargs) else: + kolaargs.extend(['--output-dir', outputdir]) # flush before exec; see https://docs.python.org/3.7/library/os.html#os.execvpe print(subprocess.list2cmdline(kolaargs), flush=True) os.execvp('kola', kolaargs) diff --git a/src/cmd-virt-install b/src/cmd-virt-install index 7c95e8c9..231a9ea5 100755 --- a/src/cmd-virt-install +++ b/src/cmd-virt-install @@ -124,7 +124,8 @@ domname = f"{args.name}-{args.instid}" qemu_args = " ".join(['-fw_cfg', f'name=opt/com.coreos/config,file={poolpath}/{ignvol}']) basevinstall_args = ['virt-install', f"--connect={args.connect}", '--import', f'--disk=source.pool={args.pool},source.volume={volname}', - f'--name={domname}', '--os-type=linux', '--os-variant=rhel8-unknown', + '--tpm', 'emulator', + f'--name={domname}', '--os-variant=rhel8-unknown', f'--qemu-commandline={qemu_args}', '--noautoconsole'] cmdlib.runcmd(basevinstall_args + vinstall_args) diff --git a/src/cmdlib.sh b/src/cmdlib.sh index a6adc2f0..9338042e 100755 --- a/src/cmdlib.sh +++ b/src/cmdlib.sh @@ -645,7 +645,7 @@ EOF chmod a+x "${vmpreparedir}"/init (cd "${vmpreparedir}" && tar -czf init.tar.gz --remove-files init) # put the supermin output in a separate file since it's noisy - if ! supermin --build "${vmpreparedir}" --size 5G -f ext2 -o "${vmbuilddir}" \ + if ! supermin --build "${vmpreparedir}" --size 10G -f ext2 -o "${vmbuilddir}" \ &> "${tmp_builddir}/supermin.out"; then cat "${tmp_builddir}/supermin.out" fatal "Failed to run: supermin --build" @@ -681,11 +681,15 @@ EOF -append "root=/dev/vda console=${DEFAULT_TERMINAL} selinux=1 enforcing=0 autorelabel=1" \ ) - # support local dev cases where src/config is a symlink - if [ -L "${workdir}/src/config" ]; then - # qemu follows symlinks - base_qemu_args+=("-virtfs" 'local,id=source,path='"${workdir}"'/src/config,security_model=none,mount_tag=source') - fi + # support local dev cases where src/config is a symlink. Note if you change or extend to this set, + # you also need to update supermin-init-prelude.sh to mount it inside the VM. + for maybe_symlink in "${workdir}"/{src/config,src/yumrepos}; do + if [ -L "${maybe_symlink}" ]; then + local bn + bn=$(basename "${maybe_symlink}") + kola_args+=("--bind-ro" "${maybe_symlink},/cosa/src/${bn}") + fi + done if [ -z "${RUNVM_SHELL:-}" ]; then if ! "${kola_args[@]}" -- "${base_qemu_args[@]}" \ diff --git a/src/cosalib/qemuvariants.py b/src/cosalib/qemuvariants.py index 0f5520c1..99ae0973 100644 --- a/src/cosalib/qemuvariants.py +++ b/src/cosalib/qemuvariants.py @@ -78,14 +78,14 @@ VARIANTS = { "image_format": "qcow2", "image_suffix": "qcow2.gz", "platform": "digitalocean", - "gzip": True + "compression": "gzip" }, "gcp": { # See https://cloud.google.com/compute/docs/import/import-existing-image#requirements_for_the_image_file "image_format": "raw", "platform": "gcp", "image_suffix": "tar.gz", - "gzip": True, + "compression": "gzip", "convert_options": { '-o': 'preallocation=off' }, @@ -116,12 +116,16 @@ VARIANTS = { "nutanix": { "image_format": "qcow2", "platform": "nutanix", + "compression": "skip", + "convert_options": { + '-c': None + } }, "vmware_vmdk": { "image_format": "vmdk", "image_suffix": "vmdk", "platform": "vmware", - "convert_options": { + "convert_options": { '-o': 'adapter_type=lsilogic,subformat=streamOptimized,compat6' } }, @@ -186,7 +190,7 @@ class QemuVariantImage(_Build): self.compress = kwargs.get("compress", False) self.tar_members = kwargs.pop("tar_members", None) self.tar_flags = kwargs.pop("tar_flags", [DEFAULT_TAR_FLAGS]) - self.gzip = kwargs.pop("gzip", False) + self.compression = kwargs.pop("compression", None) self.virtual_size = kwargs.pop("virtual_size", None) self.mutate_callback_creates_final_image = False @@ -202,13 +206,13 @@ class QemuVariantImage(_Build): """ Return the path of the Qemu QCOW2 image from the meta-data """ - qemu_meta = self.meta.get_artifact_meta("metal", unmerged=True) + qemu_meta = self.meta.get_artifact_meta("qemu", unmerged=True) qimage = os.path.join( self.build_dir, - qemu_meta.get('images', {}).get('metal', {}).get('path', None) + qemu_meta.get('images', {}).get('qemu', {}).get('path', None) ) if not qimage: - raise ImageError("metal image has not be built yet") + raise ImageError("qemu image has not be built yet") elif not os.path.exists(qimage): raise ImageError(f"{qimage} does not exist") return qimage @@ -230,8 +234,8 @@ class QemuVariantImage(_Build): return None def set_platform(self): - runcmd(['/usr/lib/coreos-assembler/gf-platformid', - self.image_qemu, self.tmp_image, self.platform]) + runcmd(['/usr/lib/coreos-assembler/gf-set-platform', + self.image_qemu, self.tmp_image, self.platform]) def mutate_image(self): """ @@ -264,10 +268,12 @@ class QemuVariantImage(_Build): self.tmp_image, self.virtual_size] runcmd(resize_cmd) - cmd = ['qemu-img', 'convert', '-f', 'raw', '-O', + cmd = ['qemu-img', 'convert', '-f', 'qcow2', '-O', self.image_format, self.tmp_image] for k, v in self.convert_options.items(): - cmd.extend([k, v]) + cmd.extend([k]) + if v is not None: + cmd.extend([v]) cmd.extend([work_img]) runcmd(cmd) @@ -305,19 +311,36 @@ class QemuVariantImage(_Build): log.info(f"Moving {work_img} to {final_img}") shutil.move(work_img, final_img) - if self.gzip: + if self.compression == "skip": + meta_patch.update({ + 'skip-compression': True + }) + elif self.compression is not None: sha256 = sha256sum_file(final_img) size = os.stat(final_img).st_size - temp_path = f"{final_img}.tmp" - with open(temp_path, "wb") as fh: - runcmd(['gzip', '-9c', final_img], stdout=fh) - shutil.move(temp_path, final_img) + + # gzip and zip both embed the input filename in the output + # archive. For gzip this is harmless, since gunzip ignores it + # unless -N is specified, but zip is a container format (like + # tar) so we need to care. Strip the .gz or .zip filename + # extension from the uncompressed file before compressing. + uncompressed_path = os.path.splitext(final_img)[0] + os.rename(final_img, uncompressed_path) + match self.compression: + case "gzip": + rc = ['gzip', '-9c', uncompressed_path] + case "zip": + rc = ['zip', '-9j', "-", uncompressed_path] + case _: + raise ImageError(f"unsupported compression type: {self.compression}") + with open(final_img, "wb") as fh: + runcmd(rc, stdout=fh) + os.unlink(uncompressed_path) meta_patch.update({ 'skip-compression': True, 'uncompressed-sha256': sha256, 'uncompressed-size': size, }) - return meta_patch def _build_artifacts(self, *args, **kwargs): diff --git a/src/libguestfish.sh b/src/libguestfish.sh index 506ce70c..15fa6665 100755 --- a/src/libguestfish.sh +++ b/src/libguestfish.sh @@ -19,6 +19,12 @@ if [ "$arch" = "ppc64le" ] ; then fi fi +# Hack to give ppc64le more memory inside the libguestfs VM. +# The compiled in default I see when running `guestfish get-memsize` +# is 1280. We need this because we are seeing issues from +# buildextend-live when running gf-mksquashfs. +[ "$arch" = "ppc64le" ] && export LIBGUESTFS_MEMSIZE=3072 + # http://libguestfs.org/guestfish.1.html#using-remote-control-robustly-from-shell-scripts GUESTFISH_PID= coreos_gf_launch() { @@ -26,7 +32,7 @@ coreos_gf_launch() { return fi - eval "$(guestfish --listen --format=raw -a "$@")" + eval "$(guestfish --listen -a "$@")" if [ -z "$GUESTFISH_PID" ]; then fatal "guestfish didn't start up, see error messages above" fi @@ -47,6 +53,8 @@ coreos_gf_run() { return fi coreos_gf_launch "$@" + # Allow mksquashfs to parallelize + coreos_gf set-smp "$(kola ncpu)" coreos_gf run GUESTFISH_RUNNING=1 } diff --git a/src/oscontainer-deprecated-legacy-format.py b/src/oscontainer-deprecated-legacy-format.py new file mode 100755 index 00000000..bcb56ee0 --- /dev/null +++ b/src/oscontainer-deprecated-legacy-format.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python3 +# NOTE: PYTHONUNBUFFERED is set in the entrypoint for unbuffered output +# +# An "oscontainer" is an ostree (archive) repository stuck inside +# a Docker/OCI container at /srv/repo. For more information, +# see https://github.com/openshift/pivot +# +# This command manipulates those images. + +import gi + +gi.require_version('OSTree', '1.0') +gi.require_version('RpmOstree', '1.0') + +from gi.repository import GLib, Gio, OSTree, RpmOstree + +import argparse +import json +import os +import shutil +import subprocess +from cosalib import cmdlib +from cosalib.buildah import ( + buildah_base_args +) + +OSCONTAINER_COMMIT_LABEL = 'com.coreos.ostree-commit' + + +def run_get_json(args): + return json.loads(subprocess.check_output(args)) + + +def run_get_string(args): + return subprocess.check_output(args, encoding='UTF-8').strip() + + +def find_commit_from_oscontainer(repo): + """Given an ostree repo, find exactly one commit object in it""" + o = subprocess.check_output(['find', repo + '/objects', '-name', '*.commit'], encoding='UTF-8').strip().split('\n') + if len(o) > 1: + raise SystemExit(f"Multiple commit objects found in {repo}") + d, n = os.path.split(o[0]) + return os.path.basename(d) + n.split('.')[0] + + +# Given a container reference, pull the latest version, then extract the ostree +# repo a new directory dest/repo. +def oscontainer_extract(containers_storage, tmpdir, src, dest, + tls_verify=True, ref=None, cert_dir="", + authfile=""): + dest = os.path.realpath(dest) + subprocess.check_call(["ostree", "--repo=" + dest, "refs"]) + + # FIXME: Today we use skopeo in a hacky way for this. What we + # really want is the equivalent of `oc image extract` as part of + # podman or skopeo. + cmd = ['skopeo'] + # See similar message in oscontainer_build. + if tmpdir is not None: + os.environ['TMPDIR'] = tmpdir + + if not tls_verify: + cmd.append('--tls-verify=false') + + if authfile != "": + cmd.append("--authfile={}".format(authfile)) + if cert_dir != "": + cmd.append("--cert-dir={}".format(cert_dir)) + tmp_tarball = tmpdir + '/container.tar' + cmd += ['copy', "docker://" + src, 'docker-archive://' + tmp_tarball] + cmdlib.runcmd(cmd) + cmdlib.runcmd(['tar', 'xf', tmp_tarball], cwd=tmpdir) + os.unlink(tmp_tarball) + # This is a brutal hack to extract all the layers; we don't even bother with ordering + # because we know we're not removing anything in higher layers. + subprocess.check_call(['find', '-name', '*.tar', '-exec', 'tar', 'xUf', '{}', ';'], cwd=tmpdir) + # Some files/directories aren't writable, and this will cause permission errors + subprocess.check_call(['find', '!', '-perm', '-u+w', '-exec', 'chmod', 'u+w', '{}', ';'], cwd=tmpdir) + + repo = tmpdir + '/srv/repo' + commit = find_commit_from_oscontainer(repo) + print(f"commit: {commit}") + cmdlib.runcmd(["ostree", "--repo=" + dest, "pull-local", repo, commit]) + if ref is not None: + cmdlib.runcmd([ + "ostree", "--repo=" + dest, "refs", '--create=' + ref, commit]) + + +# Given an OSTree repository at src (and exactly one ref) generate an +# oscontainer with it. +def oscontainer_build(containers_storage, tmpdir, src, ref, image_name_and_tag, + base_image, push=False, tls_verify=True, pushformat=None, + add_directories=[], cert_dir="", authfile="", digestfile=None, + display_name=None, labeled_pkgs=[]): + r = OSTree.Repo.new(Gio.File.new_for_path(src)) + r.open(None) + + [_, rev] = r.resolve_rev(ref, True) + if ref != rev: + print("Resolved {} = {}".format(ref, rev)) + [_, ostree_commit, _] = r.load_commit(rev) + ostree_commitmeta = ostree_commit.get_child_value(0) + versionv = ostree_commitmeta.lookup_value( + "version", GLib.VariantType.new("s")) + if versionv: + ostree_version = versionv.get_string() + else: + ostree_version = None + + buildah_base_argv = buildah_base_args(containers_storage) + + # In general, we just stick with the default tmpdir set up. But if a + # workdir is provided, then we want to be sure that all the heavy I/O work + # that happens stays in there since e.g. we might be inside a tiny supermin + # appliance. + if tmpdir is not None: + os.environ['TMPDIR'] = tmpdir + + bid = run_get_string(buildah_base_argv + ['from', base_image]) + mnt = run_get_string(buildah_base_argv + ['mount', bid]) + try: + dest_repo = os.path.join(mnt, 'srv/repo') + subprocess.check_call(['mkdir', '-p', dest_repo]) + subprocess.check_call([ + "ostree", "--repo=" + dest_repo, "init", "--mode=archive"]) + # Note that oscontainers don't have refs; we also disable fsync + # because the repo will be put into a container image and the build + # process should handle its own fsync (or choose not to). + print("Copying ostree commit into container: {} ...".format(rev)) + cmdlib.runcmd(["ostree", "--repo=" + dest_repo, "pull-local", "--disable-fsync", src, rev]) + + for d in add_directories: + with os.scandir(d) as it: + for entry in it: + dest = os.path.join(mnt, entry.name) + subprocess.check_call(['/usr/lib/coreos-assembler/cp-reflink', entry.path, dest]) + print(f"Copied in content from: {d}") + + # We use /noentry to trick `podman create` into not erroring out + # on a container with no cmd/entrypoint. It won't actually be run. + config = ['--entrypoint', '["/noentry"]', + '-l', OSCONTAINER_COMMIT_LABEL + '=' + rev] + if ostree_version is not None: + config += ['-l', 'version=' + ostree_version] + + base_pkgs = RpmOstree.db_query_all(r, rev, None) + for pkg in base_pkgs: + name = pkg.get_name() + if name in labeled_pkgs: + config += ['-l', f"com.coreos.rpm.{name}={pkg.get_evr()}.{pkg.get_arch()}"] + + # Generate pkglist.txt in to the oscontainer at / + pkg_list_dest = os.path.join(mnt, 'pkglist.txt') + # should already be sorted, but just re-sort to be sure + nevras = sorted([pkg.get_nevra() for pkg in base_pkgs]) + with open(pkg_list_dest, 'w') as f: + for nevra in nevras: + f.write(nevra) + f.write('\n') + + meta = {} + builddir = None + if os.path.isfile('builds/builds.json'): + with open('builds/builds.json') as fb: + builds = json.load(fb)['builds'] + latest_build = builds[0]['id'] + arch = cmdlib.get_basearch() + builddir = f"builds/{latest_build}/{arch}" + metapath = f"{builddir}/meta.json" + with open(metapath) as f: + meta = json.load(f) + rhcos_commit = meta['coreos-assembler.container-config-git']['commit'] + imagegit = meta.get('coreos-assembler.container-image-git') + if imagegit is not None: + cosa_commit = imagegit['commit'] + config += ['-l', f"com.coreos.coreos-assembler-commit={cosa_commit}"] + config += ['-l', f"com.coreos.redhat-coreos-commit={rhcos_commit}"] + + if 'extensions' in meta: + tarball = os.path.abspath(os.path.join(builddir, meta['extensions']['path'])) + dest_dir = os.path.join(mnt, 'extensions') + os.makedirs(dest_dir, exist_ok=True) + cmdlib.runcmd(["tar", "-xf", tarball], cwd=dest_dir) + + with open(os.path.join(dest_dir, 'extensions.json')) as f: + extensions = json.load(f) + + extensions_label = ';'.join([ext for (ext, obj) in extensions['extensions'].items() + if obj.get('kind', 'os-extension') == 'os-extension']) + config += ['-l', f"com.coreos.os-extensions={extensions_label}"] + + for pkgname in meta['extensions']['manifest']: + if pkgname in labeled_pkgs: + evra = meta['extensions']['manifest'][pkgname] + config += ['-l', f"com.coreos.rpm.{pkgname}={evra}"] + + if display_name is not None: + config += ['-l', 'io.openshift.build.version-display-names=machine-os=' + display_name, + '-l', 'io.openshift.build.versions=machine-os=' + ostree_version] + cmdlib.runcmd(buildah_base_argv + ['config'] + config + [bid]) + print("Committing container...") + iid = run_get_string(buildah_base_argv + ['commit', bid, image_name_and_tag]) + print("{} {}".format(image_name_and_tag, iid)) + finally: + subprocess.call(buildah_base_argv + ['umount', bid], stdout=subprocess.DEVNULL) + subprocess.call(buildah_base_argv + ['rm', bid], stdout=subprocess.DEVNULL) + + if push: + print("Pushing container") + podCmd = buildah_base_argv + ['push'] + if not tls_verify: + tls_arg = '--tls-verify=false' + else: + tls_arg = '--tls-verify' + podCmd.append(tls_arg) + + if authfile != "": + podCmd.append("--authfile={}".format(authfile)) + + if cert_dir != "": + podCmd.append("--cert-dir={}".format(cert_dir)) + + if digestfile is not None: + podCmd.append(f'--digestfile={digestfile}') + + if pushformat is not None: + podCmd.append(f'--format={pushformat}') + + podCmd.append(image_name_and_tag) + + cmdlib.runcmd(podCmd) + elif digestfile is not None: + inspect = run_get_json(buildah_base_argv + ['inspect', image_name_and_tag])[0] + with open(digestfile, 'w') as f: + f.write(inspect['Digest']) + + +def main(): + # Parse args and dispatch + parser = argparse.ArgumentParser() + parser.add_argument("--workdir", help="Temporary working directory") + parser.add_argument("--disable-tls-verify", + help="Disable TLS for pushes and pulls", + default=(True if os.environ.get("DISABLE_TLS_VERIFICATION", False) else False), + action="store_true") + parser.add_argument("--cert-dir", help="Extra certificate directories", + default=os.environ.get("OSCONTAINER_CERT_DIR", '')) + parser.add_argument("--authfile", help="Path to authentication file", + action="store", + default=os.environ.get("REGISTRY_AUTH_FILE", '')) + subparsers = parser.add_subparsers(dest='action') + parser_extract = subparsers.add_parser( + 'extract', help='Extract an oscontainer') + parser_extract.add_argument("src", help="Image reference") + parser_extract.add_argument("dest", help="Destination directory") + parser_extract.add_argument("--ref", help="Also set an ostree ref") + parser_build = subparsers.add_parser('build', help='Build an oscontainer') + parser_build.add_argument( + "--from", + help="Base image (default 'scratch')", + default='scratch') + parser_build.add_argument("src", help="OSTree repository") + parser_build.add_argument("rev", help="OSTree ref (or revision)") + parser_build.add_argument("name", help="Image name") + parser_build.add_argument("--display-name", help="Name used for an OpenShift component") + parser_build.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", + metavar='DIR', action='append', default=[]) + parser_build.add_argument("--labeled-packages", help="Packages whose NEVRAs are included as labels on the image") + # For now we forcibly override to v2s2 https://bugzilla.redhat.com/show_bug.cgi?id=2058421 + parser_build.add_argument("--format", help="Pass through push format to buildah", default="v2s2") + parser_build.add_argument( + "--digestfile", + help="Write image digest to file", + action='store', + metavar='FILE') + parser_build.add_argument( + "--push", + help="Push to registry", + action='store_true') + args = parser.parse_args() + + labeled_pkgs = [] + if args.labeled_packages is not None: + labeled_pkgs = args.labeled_packages.split() + + containers_storage = None + tmpdir = None + if args.workdir is not None: + containers_storage = os.path.join(args.workdir, 'containers-storage') + if os.path.exists(containers_storage): + shutil.rmtree(containers_storage) + tmpdir = os.path.join(args.workdir, 'tmp') + if os.path.exists(tmpdir): + shutil.rmtree(tmpdir) + os.makedirs(tmpdir) + + try: + if args.action == 'extract': + oscontainer_extract( + containers_storage, tmpdir, args.src, args.dest, + tls_verify=not args.disable_tls_verify, + cert_dir=args.cert_dir, + ref=args.ref, + authfile=args.authfile) + elif args.action == 'build': + oscontainer_build( + containers_storage, tmpdir, args.src, args.rev, args.name, + getattr(args, 'from'), + display_name=args.display_name, + digestfile=args.digestfile, + add_directories=args.add_directory, + push=args.push, + pushformat=args.format, + tls_verify=not args.disable_tls_verify, + cert_dir=args.cert_dir, + authfile=args.authfile, + labeled_pkgs=labeled_pkgs) + finally: + if containers_storage is not None and os.path.isdir(containers_storage): + shutil.rmtree(containers_storage) + + +if __name__ == '__main__': + main() diff --git a/src/oscontainer.py b/src/oscontainer.py deleted file mode 100755 index bcb56ee0..00000000 --- a/src/oscontainer.py +++ /dev/null @@ -1,325 +0,0 @@ -#!/usr/bin/env python3 -# NOTE: PYTHONUNBUFFERED is set in the entrypoint for unbuffered output -# -# An "oscontainer" is an ostree (archive) repository stuck inside -# a Docker/OCI container at /srv/repo. For more information, -# see https://github.com/openshift/pivot -# -# This command manipulates those images. - -import gi - -gi.require_version('OSTree', '1.0') -gi.require_version('RpmOstree', '1.0') - -from gi.repository import GLib, Gio, OSTree, RpmOstree - -import argparse -import json -import os -import shutil -import subprocess -from cosalib import cmdlib -from cosalib.buildah import ( - buildah_base_args -) - -OSCONTAINER_COMMIT_LABEL = 'com.coreos.ostree-commit' - - -def run_get_json(args): - return json.loads(subprocess.check_output(args)) - - -def run_get_string(args): - return subprocess.check_output(args, encoding='UTF-8').strip() - - -def find_commit_from_oscontainer(repo): - """Given an ostree repo, find exactly one commit object in it""" - o = subprocess.check_output(['find', repo + '/objects', '-name', '*.commit'], encoding='UTF-8').strip().split('\n') - if len(o) > 1: - raise SystemExit(f"Multiple commit objects found in {repo}") - d, n = os.path.split(o[0]) - return os.path.basename(d) + n.split('.')[0] - - -# Given a container reference, pull the latest version, then extract the ostree -# repo a new directory dest/repo. -def oscontainer_extract(containers_storage, tmpdir, src, dest, - tls_verify=True, ref=None, cert_dir="", - authfile=""): - dest = os.path.realpath(dest) - subprocess.check_call(["ostree", "--repo=" + dest, "refs"]) - - # FIXME: Today we use skopeo in a hacky way for this. What we - # really want is the equivalent of `oc image extract` as part of - # podman or skopeo. - cmd = ['skopeo'] - # See similar message in oscontainer_build. - if tmpdir is not None: - os.environ['TMPDIR'] = tmpdir - - if not tls_verify: - cmd.append('--tls-verify=false') - - if authfile != "": - cmd.append("--authfile={}".format(authfile)) - if cert_dir != "": - cmd.append("--cert-dir={}".format(cert_dir)) - tmp_tarball = tmpdir + '/container.tar' - cmd += ['copy', "docker://" + src, 'docker-archive://' + tmp_tarball] - cmdlib.runcmd(cmd) - cmdlib.runcmd(['tar', 'xf', tmp_tarball], cwd=tmpdir) - os.unlink(tmp_tarball) - # This is a brutal hack to extract all the layers; we don't even bother with ordering - # because we know we're not removing anything in higher layers. - subprocess.check_call(['find', '-name', '*.tar', '-exec', 'tar', 'xUf', '{}', ';'], cwd=tmpdir) - # Some files/directories aren't writable, and this will cause permission errors - subprocess.check_call(['find', '!', '-perm', '-u+w', '-exec', 'chmod', 'u+w', '{}', ';'], cwd=tmpdir) - - repo = tmpdir + '/srv/repo' - commit = find_commit_from_oscontainer(repo) - print(f"commit: {commit}") - cmdlib.runcmd(["ostree", "--repo=" + dest, "pull-local", repo, commit]) - if ref is not None: - cmdlib.runcmd([ - "ostree", "--repo=" + dest, "refs", '--create=' + ref, commit]) - - -# Given an OSTree repository at src (and exactly one ref) generate an -# oscontainer with it. -def oscontainer_build(containers_storage, tmpdir, src, ref, image_name_and_tag, - base_image, push=False, tls_verify=True, pushformat=None, - add_directories=[], cert_dir="", authfile="", digestfile=None, - display_name=None, labeled_pkgs=[]): - r = OSTree.Repo.new(Gio.File.new_for_path(src)) - r.open(None) - - [_, rev] = r.resolve_rev(ref, True) - if ref != rev: - print("Resolved {} = {}".format(ref, rev)) - [_, ostree_commit, _] = r.load_commit(rev) - ostree_commitmeta = ostree_commit.get_child_value(0) - versionv = ostree_commitmeta.lookup_value( - "version", GLib.VariantType.new("s")) - if versionv: - ostree_version = versionv.get_string() - else: - ostree_version = None - - buildah_base_argv = buildah_base_args(containers_storage) - - # In general, we just stick with the default tmpdir set up. But if a - # workdir is provided, then we want to be sure that all the heavy I/O work - # that happens stays in there since e.g. we might be inside a tiny supermin - # appliance. - if tmpdir is not None: - os.environ['TMPDIR'] = tmpdir - - bid = run_get_string(buildah_base_argv + ['from', base_image]) - mnt = run_get_string(buildah_base_argv + ['mount', bid]) - try: - dest_repo = os.path.join(mnt, 'srv/repo') - subprocess.check_call(['mkdir', '-p', dest_repo]) - subprocess.check_call([ - "ostree", "--repo=" + dest_repo, "init", "--mode=archive"]) - # Note that oscontainers don't have refs; we also disable fsync - # because the repo will be put into a container image and the build - # process should handle its own fsync (or choose not to). - print("Copying ostree commit into container: {} ...".format(rev)) - cmdlib.runcmd(["ostree", "--repo=" + dest_repo, "pull-local", "--disable-fsync", src, rev]) - - for d in add_directories: - with os.scandir(d) as it: - for entry in it: - dest = os.path.join(mnt, entry.name) - subprocess.check_call(['/usr/lib/coreos-assembler/cp-reflink', entry.path, dest]) - print(f"Copied in content from: {d}") - - # We use /noentry to trick `podman create` into not erroring out - # on a container with no cmd/entrypoint. It won't actually be run. - config = ['--entrypoint', '["/noentry"]', - '-l', OSCONTAINER_COMMIT_LABEL + '=' + rev] - if ostree_version is not None: - config += ['-l', 'version=' + ostree_version] - - base_pkgs = RpmOstree.db_query_all(r, rev, None) - for pkg in base_pkgs: - name = pkg.get_name() - if name in labeled_pkgs: - config += ['-l', f"com.coreos.rpm.{name}={pkg.get_evr()}.{pkg.get_arch()}"] - - # Generate pkglist.txt in to the oscontainer at / - pkg_list_dest = os.path.join(mnt, 'pkglist.txt') - # should already be sorted, but just re-sort to be sure - nevras = sorted([pkg.get_nevra() for pkg in base_pkgs]) - with open(pkg_list_dest, 'w') as f: - for nevra in nevras: - f.write(nevra) - f.write('\n') - - meta = {} - builddir = None - if os.path.isfile('builds/builds.json'): - with open('builds/builds.json') as fb: - builds = json.load(fb)['builds'] - latest_build = builds[0]['id'] - arch = cmdlib.get_basearch() - builddir = f"builds/{latest_build}/{arch}" - metapath = f"{builddir}/meta.json" - with open(metapath) as f: - meta = json.load(f) - rhcos_commit = meta['coreos-assembler.container-config-git']['commit'] - imagegit = meta.get('coreos-assembler.container-image-git') - if imagegit is not None: - cosa_commit = imagegit['commit'] - config += ['-l', f"com.coreos.coreos-assembler-commit={cosa_commit}"] - config += ['-l', f"com.coreos.redhat-coreos-commit={rhcos_commit}"] - - if 'extensions' in meta: - tarball = os.path.abspath(os.path.join(builddir, meta['extensions']['path'])) - dest_dir = os.path.join(mnt, 'extensions') - os.makedirs(dest_dir, exist_ok=True) - cmdlib.runcmd(["tar", "-xf", tarball], cwd=dest_dir) - - with open(os.path.join(dest_dir, 'extensions.json')) as f: - extensions = json.load(f) - - extensions_label = ';'.join([ext for (ext, obj) in extensions['extensions'].items() - if obj.get('kind', 'os-extension') == 'os-extension']) - config += ['-l', f"com.coreos.os-extensions={extensions_label}"] - - for pkgname in meta['extensions']['manifest']: - if pkgname in labeled_pkgs: - evra = meta['extensions']['manifest'][pkgname] - config += ['-l', f"com.coreos.rpm.{pkgname}={evra}"] - - if display_name is not None: - config += ['-l', 'io.openshift.build.version-display-names=machine-os=' + display_name, - '-l', 'io.openshift.build.versions=machine-os=' + ostree_version] - cmdlib.runcmd(buildah_base_argv + ['config'] + config + [bid]) - print("Committing container...") - iid = run_get_string(buildah_base_argv + ['commit', bid, image_name_and_tag]) - print("{} {}".format(image_name_and_tag, iid)) - finally: - subprocess.call(buildah_base_argv + ['umount', bid], stdout=subprocess.DEVNULL) - subprocess.call(buildah_base_argv + ['rm', bid], stdout=subprocess.DEVNULL) - - if push: - print("Pushing container") - podCmd = buildah_base_argv + ['push'] - if not tls_verify: - tls_arg = '--tls-verify=false' - else: - tls_arg = '--tls-verify' - podCmd.append(tls_arg) - - if authfile != "": - podCmd.append("--authfile={}".format(authfile)) - - if cert_dir != "": - podCmd.append("--cert-dir={}".format(cert_dir)) - - if digestfile is not None: - podCmd.append(f'--digestfile={digestfile}') - - if pushformat is not None: - podCmd.append(f'--format={pushformat}') - - podCmd.append(image_name_and_tag) - - cmdlib.runcmd(podCmd) - elif digestfile is not None: - inspect = run_get_json(buildah_base_argv + ['inspect', image_name_and_tag])[0] - with open(digestfile, 'w') as f: - f.write(inspect['Digest']) - - -def main(): - # Parse args and dispatch - parser = argparse.ArgumentParser() - parser.add_argument("--workdir", help="Temporary working directory") - parser.add_argument("--disable-tls-verify", - help="Disable TLS for pushes and pulls", - default=(True if os.environ.get("DISABLE_TLS_VERIFICATION", False) else False), - action="store_true") - parser.add_argument("--cert-dir", help="Extra certificate directories", - default=os.environ.get("OSCONTAINER_CERT_DIR", '')) - parser.add_argument("--authfile", help="Path to authentication file", - action="store", - default=os.environ.get("REGISTRY_AUTH_FILE", '')) - subparsers = parser.add_subparsers(dest='action') - parser_extract = subparsers.add_parser( - 'extract', help='Extract an oscontainer') - parser_extract.add_argument("src", help="Image reference") - parser_extract.add_argument("dest", help="Destination directory") - parser_extract.add_argument("--ref", help="Also set an ostree ref") - parser_build = subparsers.add_parser('build', help='Build an oscontainer') - parser_build.add_argument( - "--from", - help="Base image (default 'scratch')", - default='scratch') - parser_build.add_argument("src", help="OSTree repository") - parser_build.add_argument("rev", help="OSTree ref (or revision)") - parser_build.add_argument("name", help="Image name") - parser_build.add_argument("--display-name", help="Name used for an OpenShift component") - parser_build.add_argument("--add-directory", help="Copy in all content from referenced directory DIR", - metavar='DIR', action='append', default=[]) - parser_build.add_argument("--labeled-packages", help="Packages whose NEVRAs are included as labels on the image") - # For now we forcibly override to v2s2 https://bugzilla.redhat.com/show_bug.cgi?id=2058421 - parser_build.add_argument("--format", help="Pass through push format to buildah", default="v2s2") - parser_build.add_argument( - "--digestfile", - help="Write image digest to file", - action='store', - metavar='FILE') - parser_build.add_argument( - "--push", - help="Push to registry", - action='store_true') - args = parser.parse_args() - - labeled_pkgs = [] - if args.labeled_packages is not None: - labeled_pkgs = args.labeled_packages.split() - - containers_storage = None - tmpdir = None - if args.workdir is not None: - containers_storage = os.path.join(args.workdir, 'containers-storage') - if os.path.exists(containers_storage): - shutil.rmtree(containers_storage) - tmpdir = os.path.join(args.workdir, 'tmp') - if os.path.exists(tmpdir): - shutil.rmtree(tmpdir) - os.makedirs(tmpdir) - - try: - if args.action == 'extract': - oscontainer_extract( - containers_storage, tmpdir, args.src, args.dest, - tls_verify=not args.disable_tls_verify, - cert_dir=args.cert_dir, - ref=args.ref, - authfile=args.authfile) - elif args.action == 'build': - oscontainer_build( - containers_storage, tmpdir, args.src, args.rev, args.name, - getattr(args, 'from'), - display_name=args.display_name, - digestfile=args.digestfile, - add_directories=args.add_directory, - push=args.push, - pushformat=args.format, - tls_verify=not args.disable_tls_verify, - cert_dir=args.cert_dir, - authfile=args.authfile, - labeled_pkgs=labeled_pkgs) - finally: - if containers_storage is not None and os.path.isdir(containers_storage): - shutil.rmtree(containers_storage) - - -if __name__ == '__main__': - main() diff --git a/src/oscontainer.py b/src/oscontainer.py new file mode 120000 index 00000000..06a195ec --- /dev/null +++ b/src/oscontainer.py @@ -0,0 +1 @@ +oscontainer-deprecated-legacy-format.py \ No newline at end of file diff --git a/src/runvm.sh b/src/runvm.sh new file mode 100755 index 00000000..2524f18a --- /dev/null +++ b/src/runvm.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -euo pipefail + +# This script just loads cmdlib.sh and executes runvm() with the given +# command line arguemnts to the script. It's used as a convenience +# wrapper for calling into runvm from other languages (i.e. python). + +dn=$(dirname "$0") +# shellcheck source=src/cmdlib.sh +. "${dn}"/cmdlib.sh + +runvm "$@" diff --git a/src/supermin-init-prelude.sh b/src/supermin-init-prelude.sh index 100e1e14..8cf8150d 100644 --- a/src/supermin-init-prelude.sh +++ b/src/supermin-init-prelude.sh @@ -5,6 +5,7 @@ mount -t proc /proc /proc mount -t sysfs /sys /sys +mount -t cgroup2 cgroup2 -o rw,nosuid,nodev,noexec,relatime,seclabel,nsdelegate,memory_recursiveprot /sys/fs/cgroup mount -t devtmpfs devtmpfs /dev # need /dev/shm for podman @@ -18,9 +19,11 @@ LANG=C /sbin/load_policy -i # need fuse module for rofiles-fuse/bwrap during post scripts run /sbin/modprobe fuse -# we want /dev/disk symlinks for nestos-installer +# we want /dev/disk symlinks for coreos-installer /usr/lib/systemd/systemd-udevd --daemon -/usr/sbin/udevadm trigger --settle +# We've seen this hang before, so add a timeout. This is best-effort anyway, so +# let's not fail on it. +timeout 30s /usr/sbin/udevadm trigger --settle || : # set up networking if [ -z "${RUNVM_NONET:-}" ]; then diff --git a/src/swtpm-wrapper b/src/swtpm-wrapper deleted file mode 100755 index 190be335..00000000 --- a/src/swtpm-wrapper +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -euo pipefail -# Clean up tmpdir when terminated -tpm_dir="$1" -shift -setpriv --pdeathsig SIGTERM swtpm socket \ - --terminate \ - --tpmstate dir="${tpm_dir}" --tpm2 \ - --ctrl type=unixio,path="${tpm_dir}/swtpm-sock" & -trap 'rm -rf ${tpm_dir}' EXIT -wait -- Gitee