diff --git a/docs/_config.yml b/docs/_config.yml
index 71125e517c1e3c81ae2c0c25bea2e45a2d20b64d..f845c67fdddac0a76ac2590cd321b53b24a6e261 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -1,9 +1,12 @@
-title: coreos/coreos-assembler
-description: coreos-assembler documentation
+# Template generated by https://github.com/coreos/repo-templates; do not edit downstream
+
+# To test documentation changes locally or using GitHub Pages, see:
+# https://github.com/coreos/fedora-coreos-tracker/blob/main/docs/testing-project-documentation-changes.md
+
+title: CoreOS Assembler
+description: CoreOS Assembler documentation
baseurl: "/coreos-assembler"
url: "https://coreos.github.io"
-# Comment above and use below for local development
-# url: "http://localhost:4000"
permalink: /:title/
markdown: kramdown
kramdown:
@@ -11,7 +14,7 @@ kramdown:
ndash: "--"
mdash: "---"
-remote_theme: coreos/just-the-docs
+remote_theme: just-the-docs/just-the-docs@v0.6.0
plugins:
- jekyll-remote-theme
@@ -19,7 +22,7 @@ color_scheme: coreos
# Aux links for the upper right navigation
aux_links:
- "coreos-assembler on GitHub":
+ "CoreOS Assembler on GitHub":
- "https://github.com/coreos/coreos-assembler"
footer_content: "Copyright © Red Hat, Inc. and others."
diff --git a/docs/building-fcos.md b/docs/building-fcos.md
index 774a876f07f2e6bb4fa54bd3b75f428540a6e438..63ae3533a10a4a6b652f37158abb9b903bdcda4a 100644
--- a/docs/building-fcos.md
+++ b/docs/building-fcos.md
@@ -37,7 +37,7 @@ There are various public cloud options that provide either bare metal or nested
virt, such as:
- [Packet](https://www.packet.com/)
-- [GCE nested virt](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances)
+- [GCP nested virt](https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances)
- EC2 `i3.metal` instances
- [IBM Bare Metal](https://www.ibm.com/cloud/bare-metal-servers)
- etc.
@@ -96,12 +96,13 @@ cosa() {
fi
fi
set -x
- podman run --rm -ti --security-opt label=disable --privileged \
- --uidmap=1000:0:1 --uidmap=0:1:1000 --uidmap 1001:1001:64536 \
- -v ${PWD}:/srv/ --device /dev/kvm --device /dev/fuse \
- --tmpfs /tmp -v /var/tmp:/var/tmp --name cosa \
- ${COREOS_ASSEMBLER_CONFIG_GIT:+-v $COREOS_ASSEMBLER_CONFIG_GIT:/srv/src/config/:ro} \
- ${COREOS_ASSEMBLER_GIT:+-v $COREOS_ASSEMBLER_GIT/src/:/usr/lib/coreos-assembler/:ro} \
+ podman run --rm -ti --security-opt=label=disable --privileged \
+ --uidmap=1000:0:1 --uidmap=0:1:1000 --uidmap=1001:1001:64536 \
+ -v=${PWD}:/srv/ --device=/dev/kvm --device=/dev/fuse \
+ --tmpfs=/tmp -v=/var/tmp:/var/tmp --name=cosa \
+ ${COREOS_ASSEMBLER_CONFIG_GIT:+-v=$COREOS_ASSEMBLER_CONFIG_GIT:/srv/src/config/:ro} \
+ ${COREOS_ASSEMBLER_GIT:+-v=$COREOS_ASSEMBLER_GIT/src/:/usr/lib/coreos-assembler/:ro} \
+ ${COREOS_ASSEMBLER_ADD_CERTS:+-v=/etc/pki/ca-trust:/etc/pki/ca-trust:ro} \
${COREOS_ASSEMBLER_CONTAINER_RUNTIME_ARGS} \
${COREOS_ASSEMBLER_CONTAINER:-$COREOS_ASSEMBLER_CONTAINER_LATEST} "$@"
rc=$?; set +x; return $rc
@@ -135,6 +136,11 @@ The environment variables are special purpose:
- `COREOS_ASSEMBLER_CONTAINER`: Allows for overriding the default assembler
container which is currently
`quay.io/coreos-assembler/coreos-assembler:latest`.
+- `COREOS_ASSEMBLER_ADD_CERTS`: Set this variable to mount in the CA bundle
+ from the host. This is necessary if cosa needs to fetch HTTPS resources
+ signed by an authority outside the default bundle already trusted by the
+ host. Alternatively, one can use `cosa shell` as described below to have a
+ persistent container in which you can set up the root CA once.
See the [Working on CoreOS Assembler](devel.md) page for examples of how
to use these variables.
diff --git a/docs/cosa.md b/docs/cosa.md
index 77bc3d41a251575c1b1c2c3c98862bcc8e298018..6b49864abb18a9f840f296e7ed0d9d6f94285ae8 100644
--- a/docs/cosa.md
+++ b/docs/cosa.md
@@ -68,7 +68,6 @@ Those less commonly used commands are listed here:
| [offline-update](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-offline-update) | Given a disk image and a coreos-assembler build, use supermin to update the disk image to the target OSTree commit "offline"
| [prune](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-prune) | This script removes previous builds. DO NOT USE on production pipelines
| [remote-prune](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-remote-prune) | Removes unreferenced builds from s3 bucket
-| [runc](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-runc) | Spawn the current build as a container
| [sign](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-sign) | Implements signing with RoboSignatory via fedora-messaging
| [supermin-shell](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-supermin-shell) | Get a supermin shell
| [tag](https://github.com/coreos/coreos-assembler/blob/main/src/cmd-tag) | Operate on the tags in `builds.json`
diff --git a/docs/cosa/run.md b/docs/cosa/run.md
index 5a01c1437e302a0fea5277f53afa2dc4547a8462..9f44f1b3d04abdbf2a8a8205c3d2d978575163ea 100644
--- a/docs/cosa/run.md
+++ b/docs/cosa/run.md
@@ -123,17 +123,17 @@ installed system automatically just as the live environment itself was set up.)
Of course, one can also use an Ignition config or [a customized ISO](https://coreos.github.io/coreos-installer/cmd/iso/#coreos-installer-iso-customize)
or the `coreos.inst.*` kargs using `--kargs` to also manually test automated
-flows. (Many of these flows are covered by our `kola testiso` scenarios.)
+flows. (Many of these flows are covered by our `kola testiso` tests.)
## Multipath
### As primary disk
To test multipath on the primary disk in a QEMU instance, use
-`--qemu-multipath`:
+`--qemu-multipath` and add the necessary kargs:
```
-$ cosa run --qemu-multipath
+$ cosa run --qemu-multipath --kargs 'rd.multipath=default root=/dev/disk/by-label/dm-mpath-root rw'
...
[core@cosa-devsh ~]$ findmnt /sysroot
TARGET SOURCE FSTYPE OPTIONS
@@ -191,4 +191,92 @@ TARGET SOURCE FSTYPE OPTIONS
(The `--ignition /run/ignition.json` is a trick for getting auto-login on the
installed system automatically just as the live environment itself was set up.)
-This is equivalent to our `kola testiso` multipath scenarios.
+This is equivalent to our `kola testiso` multipath tests.
+
+## Netbooting
+
+You can use the `--netboot` option to boot via BOOTP (e.g. iPXE, PXELINUX, GRUB).
+
+### iPXE
+
+This is the simplest since it's the default firmware and doesn't require
+chaining. You can just point to the iPXE script, e.g.:
+
+```
+$ cat tmp/ipxe/boot.ipxe
+#!ipxe
+kernel / initrd=main coreos.live.rootfs_url= ignition.firstboot ignition.platform.id=metal console=ttyS0 ignition.config.url=
+initrd --name main /
+boot
+$ cosa run -c --netboot tmp/ipxe/boot.ipxe
+```
+
+(That example requires hosting the rootfs separately, but you can also combine with the initrd.)
+
+Or doing an iSCSI boot:
+
+```
+#!ipxe
+sanboot iscsi:192.168.10.1::::iqn.2023-10.coreos.target.vm:coreos
+```
+
+See [this section](https://docs.fedoraproject.org/en-US/fedora-coreos/live-booting/#_booting_via_ipxe) of the official docs for more info.
+
+### PXELINUX
+
+Point to the `pxelinux.0` binary, likely symlinked, e.g.:
+
+```
+$ tree tmp/pxelinux/
+tmp/pxelinux/
+├── fedora-coreos-38.20231010.dev.0-live-initramfs.x86_64.img -> ../../builds/latest/x86_64/fedora-coreos-38.20231010.dev.0-live-initramfs.x86_64.img
+├── fedora-coreos-38.20231010.dev.0-live-kernel-x86_64 -> ../../builds/latest/x86_64/fedora-coreos-38.20231010.dev.0-live-kernel-x86_64
+├── fedora-coreos-38.20231010.dev.0-live-rootfs.x86_64.img -> ../../builds/latest/x86_64/fedora-coreos-38.20231010.dev.0-live-rootfs.x86_64.img
+├── ldlinux.c32 -> /usr/share/syslinux/ldlinux.c32
+├── pxelinux.0 -> /usr/share/syslinux/pxelinux.0
+└── pxelinux.cfg
+ └── default
+
+2 directories, 6 files
+$ cat tmp/pxelinux/pxelinux.cfg/default
+DEFAULT pxeboot
+TIMEOUT 20
+PROMPT 0
+LABEL pxeboot
+ KERNEL fedora-coreos-38.20231010.dev.0-live-kernel-x86_64
+ APPEND initrd=fedora-coreos-38.20231010.dev.0-live-initramfs.x86_64.img,fedora-coreos-38.20231010.dev.0-live-rootfs.x86_64.img ignition.firstboot ignition.platform.id=metal ignition.config.url= console=ttyS0
+IPAPPEND 2
+
+$ cosa run -c --netboot tmp/pxelinux/pxelinux.0 -m 4096
+```
+
+See [this section](https://docs.fedoraproject.org/en-US/fedora-coreos/live-booting/#_booting_via_pxe) of the official docs for more info.
+
+### GRUB
+
+Create the netboot dir if not already created:
+
+```
+$ mkdir tmp/grub-netboot
+$ grub2-mknetdir --net-directory tmp/grub-netboot
+```
+
+Create your GRUB config, e.g.:
+
+```
+$ cat tmp/grub-netboot/boot/grub2/grub.cfg
+default=0
+timeout=1
+menuentry "CoreOS (BIOS/UEFI)" {
+ echo "Loading kernel"
+ linux /fedora-coreos-38.20231010.dev.0-live-kernel-x86_64 coreos.live.rootfs_url= ignition.firstboot ignition.platform.id=metal console=ttyS0 ignition.config.url=
+ echo "Loading initrd"
+ initrd fedora-coreos-38.20231010.dev.0-live-initramfs.x86_64.img
+}
+```
+
+And point to it and the `core.0` binary:
+
+```
+$ cosa run -c --netboot-dir tmp/grub-netboot --netboot boot/grub2/i386-pc/core.0 -m 4096
+```
diff --git a/docs/devel.md b/docs/devel.md
index b5b50760ed6c57cae55c269ee9eb9cb421340bd1..a7d2e3408f254f36be52511172592e3e521096de 100644
--- a/docs/devel.md
+++ b/docs/devel.md
@@ -37,6 +37,9 @@ $ make
$ sudo make install
```
+In the local developer case, you usually don't care about building kolet for
+other arches. You can skip building them using e.g. `make KOLET_ARCHES=x86_64`.
+
From that point on, you only need to run `make && sudo make install` if you're
hacking on cosa itself (unless there are new RPM requirements added).
@@ -50,9 +53,8 @@ Similarly, if you are only working on kola, ore or plume, you can build, test
and use them directly with:
```
-$ cd mantle
-$ ./build kola ore plume
-$ ./test
+$ make kola ore plume
+$ make mantle-check
$ ./bin/kola ...
```
@@ -115,10 +117,12 @@ rebuild.
$ pip3 install --user -r test-requirements.txt
```
-2. Run `pytest` on the `tests` directory
+2. Run `make unittest`
```
-$ pytest tests/
+$ make unittest
+COSA_TEST_META_PATH=`pwd`/fixtures \
+ PYTHONPATH=`pwd`/src python3 -m pytest tests/
============================= test session starts ==============================
platform linux -- Python 3.7.3, pytest-4.6.3, py-1.8.0, pluggy-0.12.0
rootdir: /var/home/steve/Tech/GITHUB/coreos-assembler, inifile: pytest.ini
diff --git a/docs/gangplank/README.md b/docs/gangplank/README.md
deleted file mode 100644
index e71049b72b8d9d16d073408bcb151696b0ba23f0..0000000000000000000000000000000000000000
--- a/docs/gangplank/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Gangplank Documentation
-
-This directory contains the API documentation for Gangplank.
-
-## General Documentation:
-- [Why Gangplank](WHY.md)
-- [Usage](usage.md)
-
-## Developer Documentation
-- [ocp](api-ocp.md): Core Logic of Gangplank
-- [spec](api-spec.md): JobSpec parser
-- [cosa](api-cosa.md): Shared schema between CoreOS Assembler, Mantle and Gangplank
diff --git a/docs/gangplank/WHY.md b/docs/gangplank/WHY.md
deleted file mode 100644
index a66eb64de03165f986d05b45ae8521fa689ee9dc..0000000000000000000000000000000000000000
--- a/docs/gangplank/WHY.md
+++ /dev/null
@@ -1,152 +0,0 @@
-# WHY Gangplank?
-
-Arr matey....
-
-Introduced as [part of PR 1739](https://github.com/coreos/coreos-assembler/pull/1739), the GoLang Gangplank is a CI-specific alternative "entrypoint" for executing CoreOS Assembler (COSA).
-
-## Jenkins Pipelines are Greedy
-
-One of the lessons learned from the RHCOS pipelines is that they are expensive in terms of resources. Only a few steps (build and KOLA) actually require KVM access, and then most commands do not require 4Gb of RAM. However, the entire pipeline run from start to finish, needs to run with resource guarantees of the most resource intensive step. To whit:
-- Base builds need ~5Gb of disk space for cache, 4Gb of RAM and KVM access
-- KOLA testing requires 1-2Gb of RAM per thread
-- When building all the artifacts, 60Gb of ephemeral storage is needed before compression. On over-subscribed nodes, we have had to play games with PVC's in order to avoid node evictions for consuming too much disk space.
-- The OCP cluster for the RHCOS pipeline only has a few nodes that are capable of running the pipeline. Development, developer and production-delivery pipelines all compete for the same nodes. This has led to pipeline-launched Pods, being evicted during later stages.
-- Environmental differences between the COSA CI, FCOS and RHCOS pipelines has resulted in considerable drift.
-
-Running multiple Jenkins pods is one way to deal with this. Yet, each Jenkins launched pod requires both a COSA and Agent container. In the RHCOS case, we actually have to run three containers (COSA, Jenkins and the Message Bus container) -- adding an extra 512Mb to each pod, all scheduled on an over-subscribed resource. Nevermind the maintance cost of Jenkins.
-
-The other problem with Jenkins is that you _need_:
-- The COSA Image name. The RHCOS pipeline has to know what version of COSA to use since there is a different version for each RHCOS release.
-- An Agent and Master image for Jenkins itself.
-- A pantheon of Cloud publication secrets that either are stored in Jenkins or in OCP itself. When the RHCOS pipeline started using OpenShift secrets, we ended up mounting roughly a dozen `volumeMounts` to each pod.
-- The agents often timeout, and perform poorly over high latency connections.
-
-While it's possible to run cross-cluster with Jenkins, in reality, it almost is never done. In fact, for the RHCOS pipelines, we have three teams running various versions of Jenkins and pipeline editions. The nicities of Jenkins in this world, are in fact, liabilities. A common theme for the production delivery team is various Jenkins issues. Worse, for each architecture of RHCOS we have have to stand up Jenkins and then populate an OCP namespace.
-
-## Jenkins has become a templating engine
-
-In the RHCOS case, a considerable amount of Groovy has been written to parse, check and emit "cosa " commands. The FCOS pipeline is easier to maintain and understand; the RHCOS pipeline is a special snowflake of variable-controlled rules. The complexity of RHCOS's pipeline comes from the business rules requiring special logic. FCOS's pipeline only has to support a single configuration, while RHCOS has to support at least three releases on four architectures.
-
-Initially the RCHOS pipeline uses OpenShift BuildConfig with envVars. Over time, almost all of these envVars and even Jenkins job parameters were removed. As it turns out, converting YAML to envVars to Groovy to Shell is ripe for type errors; this was especially true when dealing with truthiness.
-
-To help deal with truthiness, the RHCOS pipeline grew the "JobSpec" (as in Jenkins Job). The JobSpec was an attempt at creating a declarative method of setting variables for the pipeline to consume. This idea allows the RHCOS pipeline to run entirely via GitOps and without having to deal with type-conversion errors, or having envVars from a BuildConfig, to provide for dynamic behavior.
-
-## The Cost of Developing Jenkins
-
-The problem with using Jenkins as a templating engine is that it is incredibly inefficient for testing. Consider:
-1. changes have to be commited first
-1. then a build has to be started
-1. a human has to check the run
-1. repeat, repeat....
-
-The problem with this model is:
-- it requires developers to invest in Jenkins and, by extension Groovy
-- it's wasteful in terms of resources and re-enforces git-smash'ng
-- there is no way to have a pure logic-check run -- the commands have to actually be run
-
-Some work has been done to CI the CI, which introduced its own class of problems.
-
-## The Problem
-
-While the JobSpec provided a means of GitOps controlled execution of the Jenkins Job, *it was simply wall-papering over a glaring missing feature in COSA: the lack of a non-CLI interface into COSA*. A review of the three COSA CI pipelines shows that Jenkins provides:
-- launching pods
-- templating COSA commands
-- a pretty web-UI
-
-In fact, the problem is a bit deeper:
-- COSA assumes an order of operations that is codified in the code, but not documented. The Jenkins pipelines are quasi-authoritative in the order of operations. To whit: `cosa build` must preceed a `cosa buildextend`, some artifacts require the `metal*` artifacts while others require just the `qcow2`.
-- The CLI interface is inconsistent. Some commands are Bash, others Python and use different styled arguments.
-- The notion of `build` is based on positional perception: COSA considers building the OSTree a build, but by default it builds the `qcow2`. Pipelines consider creating `artifacts` as "building". And users consider a "build" to be _all_ the produced artifacts.
-- When COSA changes, all the pipelines have to change.
-
-## The Solution
-
-Gangplank is proposed as the "thing" to provide stable interface(s) into COSA. [Previously an envVar interface](https://github.com/coreos/enhancements/pull/1) was proposed. Bluntly speaking, the idea was not well-received. Gangplank seeks to provide a set of interfaces into COSA that:
-- provide a means of file-based instructions to COSA
-- provide a means of templating the COSA commands
-- initially provide the RHCOS JobSpec to templated COSA commands
-- act as a CI `ENTRYPOINT` for COSA containers built to run in OCP
-- run COSA as a first-class OpenShift Custom Builder
-- provide testable code for parsing the commands
-- avoid migrating Jenkins pipeline to Tekton
-
-While `gangplank` currently supports the RHCOS JobSpec, it is anticipated that other "specifications" will be introduced such as OCP's BuildConfig Specification.
-
-## GoLang to the rescue
-
-The bulk of COSA code is either Bash or Python. [It has been proposed that we support commands in GoLang](https://github.com/coreos/coreos-assembler/issues/1668), previously. And since COSA swallowed Mantle, there is a third language: GoLang.
-
-GoLang was chosen over Bash or Python for several reasons:
-- GoLang is a compiled language. For something acting as an orchestrator, run-time compilation or scripts are too fragile.
-- GoLang is strictly typed. Going from strictly typed to the loosely typed Bash or Python is "safer".
-- The contributing developers of COSA prefer Bash or GoLang over Python.
-- GoLang templating is commonly used in the OpenShift program.
-- Bash is largely untestable.
-- GoLang avoids previous COSA disputes regarding OOO and style.
-
-## Why not OpenShift Templates?
-
-An early lesson learned writing the RHCOS pipeline is that while an OpenShift template is trivial, they tend to pollute the namespace. OpenShift templates are great for deploying an application, but become tedious when deploying arbitrary configurations. For example, using an OpenShift template to deploy test, dev, and production configurations could require three separate deployments when all that changes is a single variable.
-
-The vision of the `gangplank` is to create a templated execution of COSA based on the file interface. That is, instead of managing different deployments, COSA will take a configuration (the JobSpec) and `run-steps`. A single `buildconfig` can service the needs of developers and production environments.
-
-## Jenkins as a helper
-
-Jenkins is NOT going away in this world view. Rather, Jenkins will not be directly scheduling the pods. A new set of COSA CI Libs will be created that provide wrappers around the `oc` binary for calling OpenShift BuildConfig.
-
-An example invocation might look like:
-```
- stage("build") {
- parallel x86: {
- cosa_oc("creds-x86", "build.steps")
- } aarch64: {
- cosa_oc("creds-aarch64", "build.steps")
- }
-```
-
-Where `cosa_oc` is the wrapper that:
-- sets `KUBECONFIG=creds-*`
-- creates a `build.tar` containing the `JobSpec`, `build.steps`
-- calls `oc start-build bc/cosa-priv --from-archive=build.tar --follow=true`
-
-In this world, the Secrets would exist outside of Jenkins and would be stored in the OpenShift environment and referenced in the `buildConfig` itself. Gangplank supports the OpenShift BuildAPI and the Kubernetes APIs.
-- unpack `build.tar`
-- find the `jobspec` and the `build.steps`
-- execute the steps
-
-Since the builds are using `buildConfigs`, each "build" is repeatable.
-
-Ideally, there would be BuildConfigs for:
-- privileged execution for builds that need direct /dev/kvm access
-- privileged execution for testing
-- unprivileged execution publication steps
-
-## Development and Pipeline Parity
-
-A profound pain point for COSA _and_ pipeline development is that environmental differences between the developer (and, by extension their pet container), and COSA, FCOS and RHCOS pipelines can cause a rousing round of "fix a bug whack-a-mole" (where the code works in one pipeline, but not another). `entrypoint` seeks to solve that by removing Jenkins from the Pod execution by allowing the developer to run pipeline code locally. That is, a developer should have reasonable assurances that if they locally run steps via `podman -it --entrypoint /usr/bin/gangplank coreos-assembler....` it will succeed in one of the pipelines.
-
-## `cosa remote`
-
-In the "Jenkins as a Helper" section, a curious opening appears -- the ability to run `cosa` commands _remotely_ in an OpenShift Cluster.
-
-For those unlucky enough to obtain their internet access from a major US-based cable monopoly, an incredible pain point is the "build-upload" cycle:
-1. developer begs around for $CLOUD credentials
-1. they hack on COSA, RPM's, overrides, etc.
-1. build
-1. upload
-1. do something else while 20G image is slurped up at 250Kbs...
-1. repeats steps 2-5
-
-By having COSA as a `buildConfig`, we can now have a `cosa remote` command that:
-- creates a `devel.tar` of `src`, `overrides`, and local COSA hacks with a JobSpec and `build.steps`
-- call `oc start-build bc/cosa-priv --from-archive=devel.tar --env=DEVELOPER_MODE=1 --follow=true`
-
-When the buildConfig starts, it would upack `devel.tar` and then exec into the developer's local COSA environment running remotely. This would save the developer from:
-1. having to get their own credentials
-1. the build happens close to the source
-1. when pushing the build, the developer's in-house broadband is not used
-1. development time can be significantly reduced.
-
-## In Conclusion
-
-The rationale behind draining the pipelines into Jenkins is a question of developer efficiency, satisfaction, and reducing the operational burden.
diff --git a/docs/gangplank/api-cosa.md b/docs/gangplank/api-cosa.md
deleted file mode 100755
index 1d1606b271dcea3e738344e6186b273e9efad1f5..0000000000000000000000000000000000000000
--- a/docs/gangplank/api-cosa.md
+++ /dev/null
@@ -1,1495 +0,0 @@
-
-
-# cosa
-
-```go
-import "github.com/coreos/gangplank/cosa"
-```
-
-## Index
-
-- [Constants](<#constants>)
-- [Variables](<#variables>)
-- [func BuilderArch() string](<#func-builderarch>)
-- [func CanArtifact(artifact string) bool](<#func-canartifact>)
-- [func GetCommandBuildableArtifacts() []string](<#func-getcommandbuildableartifacts>)
-- [func IsMetaJSON(path string) bool](<#func-ismetajson>)
-- [func Open(p string) (io.ReadCloser, error)](<#func-open>)
-- [func SetArch(a string)](<#func-setarch>)
-- [func SetIOBackendFile()](<#func-setiobackendfile>)
-- [func SetIOBackendMinio(ctx context.Context, m *minio.Client) error](<#func-setiobackendminio>)
-- [func SetSchemaFromFile(r io.Reader) error](<#func-setschemafromfile>)
-- [func defaultWalkFunc(p string) <-chan fileInfo](<#func-defaultwalkfunc>)
-- [func init()](<#func-init>)
-- [type AdvisoryDiff](<#type-advisorydiff>)
-- [type AdvisoryDiffItems](<#type-advisorydiffitems>)
-- [type AliyunImage](<#type-aliyunimage>)
-- [type Amis](<#type-amis>)
-- [type Artifact](<#type-artifact>)
-- [type Build](<#type-build>)
- - [func ParseBuild(path string) (*Build, error)](<#func-parsebuild>)
- - [func ReadBuild(dir, buildID, arch string) (*Build, string, error)](<#func-readbuild>)
- - [func buildParser(r io.Reader) (*Build, error)](<#func-buildparser>)
- - [func (build *Build) GetArtifact(artifact string) (*Artifact, error)](<#func-build-getartifact>)
- - [func (build *Build) IsArtifact(path string) (string, bool)](<#func-build-isartifact>)
- - [func (build *Build) Validate() []error](<#func-build-validate>)
- - [func (build *Build) WriteMeta(path string, validate bool) error](<#func-build-writemeta>)
- - [func (build *Build) artifacts() map[string]*Artifact](<#func-build-artifacts>)
- - [func (b *Build) mergeMeta(r io.Reader) error](<#func-build-mergemeta>)
-- [type BuildArtifacts](<#type-buildartifacts>)
-- [type Cloudartifact](<#type-cloudartifact>)
-- [type Extensions](<#type-extensions>)
- - [func (e *Extensions) toArtifact() *Artifact](<#func-extensions-toartifact>)
-- [type Gcp](<#type-gcp>)
-- [type Git](<#type-git>)
-- [type Image](<#type-image>)
-- [type Koji](<#type-koji>)
-- [type PackageSetDifferences](<#type-packagesetdifferences>)
-- [type PackageSetDifferencesItems](<#type-packagesetdifferencesitems>)
-- [type build](<#type-build>)
-- [type buildsJSON](<#type-buildsjson>)
- - [func getBuilds(dir string) (*buildsJSON, error)](<#func-getbuilds>)
- - [func (b *buildsJSON) getLatest(arch string) (string, bool)](<#func-buildsjson-getlatest>)
-- [type fileInfo](<#type-fileinfo>)
-- [type fileMode](<#type-filemode>)
-- [type ioBackendFile](<#type-iobackendfile>)
- - [func (i *ioBackendFile) Name() string](<#func-iobackendfile-name>)
- - [func (i *ioBackendFile) Open(p string) (io.ReadCloser, error)](<#func-iobackendfile-open>)
-- [type ioBackendMinio](<#type-iobackendminio>)
- - [func (im *ioBackendMinio) Open(p string) (io.ReadCloser, error)](<#func-iobackendminio-open>)
-- [type ioBackender](<#type-iobackender>)
- - [func newBackend() ioBackender](<#func-newbackend>)
-- [type objectInfo](<#type-objectinfo>)
- - [func (ao *objectInfo) IsDir() bool](<#func-objectinfo-isdir>)
- - [func (ao *objectInfo) ModTime() time.Time](<#func-objectinfo-modtime>)
- - [func (ao *objectInfo) Mode() fileMode](<#func-objectinfo-mode>)
- - [func (ao *objectInfo) Name() string](<#func-objectinfo-name>)
- - [func (ao *objectInfo) Size() int64](<#func-objectinfo-size>)
- - [func (ao *objectInfo) Sys() interface{}](<#func-objectinfo-sys>)
-- [type walkerFn](<#type-walkerfn>)
- - [func createMinioWalkFunc(m *minio.Client) walkerFn](<#func-createminiowalkfunc>)
-
-
-## Constants
-
-```go
-const (
- // CosaBuildsJSON is the COSA build.json file name
- CosaBuildsJSON = "builds.json"
-)
-```
-
-```go
-const (
- // CosaMetaJSON is the meta.json file
- CosaMetaJSON = "meta.json"
-)
-```
-
-## Variables
-
-```go
-var (
- // ErrMetaFailsValidation is thrown on reading and invalid meta.json
- ErrMetaFailsValidation = errors.New("meta.json failed schema validation")
-
- // ErrMetaNotFound is thrown when a meta.json cannot be found
- ErrMetaNotFound = errors.New("meta.json was not found")
-
- // reMetaJSON matches meta.json files use for merging
- reMetaJSON = regexp.MustCompile(`^meta\.(json|.*\.json)$`)
-
- // forceArch when not empty will override the build arch
- forceArch, _ = os.LookupEnv("COSA_FORCE_ARCH")
-)
-```
-
-```go
-var (
- // ErrNoBuildsFound is thrown when a build is missing
- ErrNoBuildsFound = errors.New("no COSA builds found")
-)
-```
-
-```go
-var ErrNoMinioClient = errors.New("minio client is not defined")
-```
-
-```go
-var (
- // SchemaJSON Schema document. Default the generated Schema.
- SchemaJSON = generatedSchemaJSON
-)
-```
-
-```go
-var generatedSchemaJSON = `{
- "definitions": {
- "artifact": {
- "type": "object",
- "properties": {
- "path": {
- "$id": "#/artifact/Path",
- "type":"string",
- "title":"Path"
- },
- "sha256": {
- "$id": "#/artifact/sha256",
- "type":"string",
- "title":"SHA256"
- },
- "size": {
- "$id": "#/artifact/size",
- "type":"number",
- "title":"Size in bytes"
- },
- "uncompressed-sha256": {
- "$id": "#/artifact/uncompressed-sha256",
- "type":"string",
- "title":"Uncompressed SHA256"
- },
- "uncompressed-size": {
- "$id": "#/artifact/uncompressed-size",
- "type":"integer",
- "title":"Uncompressed-size"
- }
- },
- "optional": [
- "size",
- "uncompressed-sha256",
- "uncompressed-size"
- ],
- "required": [
- "path",
- "sha256"
- ]
- },
- "image": {
- "type": "object",
- "required": [
- "digest",
- "image"
- ],
- "optional": [
- "comment"
- ],
- "properties": {
- "digest": {
- "$id": "#/image/digest",
- "type":"string",
- "title":"Digest"
- },
- "comment": {
- "$id": "#/image/comment",
- "type":"string",
- "title":"Comment"
- },
- "image": {
- "$id": "#/image/image",
- "type":"string",
- "title":"Image"
- }
- }
- },
- "cloudartifact": {
- "type": "object",
- "required": [
- "image",
- "url"
- ],
- "properties": {
- "image": {
- "$id":"#/cloudartifact/image",
- "type":"string",
- "title":"Image"
- },
- "url": {
- "$id":"#/cloudartifact/url",
- "type":"string",
- "title":"URL"
- }
- }
- },
- "git": {
- "type": "object",
- "required": [
- "commit",
- "origin"
- ],
- "optional": [
- "branch",
- "dirty"
- ],
- "properties": {
- "branch": {
- "$id":"#/git/branch",
- "type":"string",
- "title":"branch",
- "default":"",
- "examples": [
- "HEAD"
- ],
- "minLength": 3
- },
- "commit": {
- "$id":"#/git/commit",
- "type":"string",
- "title":"commit",
- "default":"",
- "examples": [
- "742edc307e58f35824d906958b6493510e12b593"
- ],
- "minLength": 5
- },
- "dirty": {
- "$id":"#/git/dirty",
- "type":"string",
- "title":"dirty",
- "default":"",
- "examples": [
- "true"
- ],
- "minLength": 1
- },
- "origin": {
- "$id":"#/git/origin",
- "type":"string",
- "title":"origin",
- "default":"",
- "examples": [
- "https://github.com/coreos/fedora-coreos-config"
- ],
- "minLength": 1
- }
- }
- },
- "pkg-items": {
- "type":"array",
- "title":"Package Set differences",
- "items": {
- "$id":"#/pkgdiff/items/item",
- "title":"Items",
- "default":"",
- "minLength": 1
- }
- },
- "advisory-items": {
- "type":"array",
- "title":"Advisory diff",
- "items": {
- "$id":"#/advisory-diff/items/item",
- "title":"Items",
- "default":""
- }
- }
- },
- "$schema":"http://json-schema.org/draft-07/schema#",
- "$id":"http://github.com/coreos/coreos-assembler/blob/main/schema/v1.json",
- "type":"object",
- "title":"CoreOS Assember v1 meta.json schema",
- "required": [
- "buildid",
- "name",
- "ostree-commit",
- "ostree-content-checksum",
- "ostree-timestamp",
- "ostree-version",
- "rpm-ostree-inputhash",
- "summary"
- ],
- "optional": [
- "aliyun",
- "amis",
- "azure",
- "azurestack",
- "build-url",
- "digitalocean",
- "exoscale",
- "gcp",
- "ibmcloud",
- "images",
- "oscontainer",
- "extensions",
- "parent-pkgdiff",
- "pkgdiff",
- "parent-advisories-diff",
- "advisories-diff",
- "release-payload",
-
- "coreos-assembler.basearch",
- "coreos-assembler.build-timestamp",
- "coreos-assembler.code-source",
- "coreos-assembler.config-dirty",
- "coreos-assembler.config-gitrev",
- "coreos-assembler.container-config-git",
- "coreos-assembler.container-image-git",
- "coreos-assembler.delayed-meta-merge",
- "coreos-assembler.image-config-checksum",
- "coreos-assembler.image-genver",
- "coreos-assembler.image-input-checksum",
- "coreos-assembler.meta-stamp",
- "coreos-assembler.overrides-active",
- "fedora-coreos.parent-commit",
- "fedora-coreos.parent-version",
- "ref"
- ],
- "additionalProperties":false,
- "properties": {
- "ref": {
- "$id":"#/properties/ref",
- "type":"string",
- "title":"BuildRef",
- "default":"",
- "minLength": 1
- },
- "build-url": {
- "$id":"#/properties/build-url",
- "type":"string",
- "title":"Build URL",
- "default":"",
- "minLength": 1
- },
- "buildid": {
- "$id":"#/properties/buildid",
- "type":"string",
- "title":"BuildID",
- "default":"",
- "minLength": 1
- },
- "coreos-assembler.basearch": {
- "$id":"#/properties/coreos-assembler.basearch",
- "type":"string",
- "title":"Architecture",
- "default":"",
- "minLength": 1
- },
- "coreos-assembler.build-timestamp": {
- "$id":"#/properties/coreos-assembler.build-timestamp",
- "type":"string",
- "title":"Build Time Stamp",
- "default":"",
- "minLength": 1
- },
- "coreos-assembler.code-source": {
- "$id":"#/properties/coreos-assembler.code-source",
- "type":"string",
- "title":"CoreOS Source",
- "default":"",
- "minLength": 1
- },
- "coreos-assembler.config-dirty": {
- "$id":"#/properties/coreos-assembler.config-dirty",
- "type":"string",
- "title":"GitDirty",
- "default":"",
- "minLength": 1
- },
- "coreos-assembler.config-gitrev": {
- "$id":"#/properties/coreos-assembler.config-gitrev",
- "type":"string",
- "title":"Config GitRev",
- "default":"",
- "minLength": 1
- },
- "coreos-assembler.container-config-git": {
- "$id":"#/properties/coreos-assembler.container-config-git",
- "type":"object",
- "title":"Container Config GIT",
- "$ref": "#/definitions/git"
- },
- "coreos-assembler.container-image-git": {
- "$id":"#/properties/coreos-assembler.container-image-git",
- "type":"object",
- "title":"COSA Container Image Git",
- "$ref": "#/definitions/git"
- },
- "coreos-assembler.delayed-meta-merge": {
- "$id":"#/properties/coreos-assembler.delayed-meta-merge",
- "type":"boolean",
- "title":"COSA Delayed Meta Merge",
- "default": "False"
- },
- "coreos-assembler.meta-stamp": {
- "$id":"#/properties/coreos-assembler.meta-stamp",
- "type":"number",
- "title":"Meta Stamp",
- "default":"",
- "minLength": 16
- },
- "fedora-coreos.parent-version": {
- "$id":"#/properties/fedora-coreos.parent-version",
- "type":"string",
- "title":"Fedora CoreOS Parent Version",
- "default":"",
- "minLength": 12
- },
- "fedora-coreos.parent-commit": {
- "$id":"#/properties/fedora-coreos.parent-commit",
- "type":"string",
- "title":"Fedora CoreOS parent commit",
- "default":"",
- "examples": [
- "f15f5b25cf138a7683e3d200c53ece2091bf71d31332135da87892ab72ff4ee3"
- ],
- "minLength": 64
- },
- "coreos-assembler.image-config-checksum": {
- "$id":"#/properties/coreos-assembler.image-config-checksum",
- "type":"string",
- "title":"COSA image checksum",
- "default":"",
- "minLength": 64
- },
- "coreos-assembler.image-genver": {
- "$id":"#/properties/coreos-assembler.image-genver",
- "type":"integer",
- "title":"COSA Image Version",
- "default": 0,
- "examples": [
- 0
- ]
- },
- "coreos-assembler.image-input-checksum": {
- "$id":"#/properties/coreos-assembler.image-input-checksum",
- "type":"string",
- "title":"Image input checksum",
- "default":"",
- "minLength": 64
- },
- "coreos-assembler.overrides-active": {
- "$id":"#/properties/coreos-assembler.overrides-active",
- "title":"Overrides Active",
- "default":"",
- "type": "boolean"
- },
- "images": {
- "$id":"#/properties/images",
- "type":"object",
- "title":"Build Artifacts",
- "required": [
- "ostree"
- ],
- "optional": [
- "aliyun",
- "aws",
- "azure",
- "azurestack",
- "dasd",
- "digitalocean",
- "exoscale",
- "gcp",
- "ibmcloud",
- "initramfs",
- "iso",
- "kernel",
- "live-kernel",
- "live-initramfs",
- "live-iso",
- "live-rootfs",
- "metal",
- "metal4k",
- "openstack",
- "qemu",
- "vmware",
- "vultr"
- ],
- "properties": {
- "ostree": {
- "$id":"#/properties/images/properties/ostree",
- "type":"object",
- "title":"OSTree",
- "$ref": "#/definitions/artifact"
- },
- "dasd": {
- "$id":"#/properties/images/properties/dasd",
- "type":"object",
- "title":"dasd",
- "$ref": "#/definitions/artifact"
- },
- "exoscale": {
- "$id":"#/properties/images/properties/exoscale",
- "type":"object",
- "title":"exoscale",
- "$ref": "#/definitions/artifact"
- },
- "qemu": {
- "$id":"#/properties/images/properties/qemu",
- "type":"object",
- "title":"Qemu",
- "$ref": "#/definitions/artifact"
- },
- "metal": {
- "$id":"#/properties/images/properties/metal",
- "type":"object",
- "title":"Metal",
- "$ref": "#/definitions/artifact"
- },
- "metal4k": {
- "$id":"#/properties/images/properties/metal4k",
- "type":"object",
- "title":"Metal (4K native)",
- "$ref": "#/definitions/artifact"
- },
- "iso": {
- "$id":"#/properties/images/properties/iso",
- "type":"object",
- "title":"ISO",
- "$ref": "#/definitions/artifact"
- },
- "kernel": {
- "$id":"#/properties/images/properties/kernel",
- "type":"object",
- "title":"Kernel",
- "$ref": "#/definitions/artifact"
- },
- "initramfs": {
- "$id":"#/properties/images/properties/initramfs",
- "type":"object",
- "title":"Initramfs",
- "$ref": "#/definitions/artifact"
- },
- "live-kernel": {
- "$id":"#/properties/images/properties/live-kernel",
- "type":"object",
- "title":"Live Kernel",
- "$ref": "#/definitions/artifact"
- },
- "live-initramfs": {
- "$id":"#/properties/images/properties/live-initramfs",
- "type":"object",
- "title":"Live Initramfs",
- "$ref": "#/definitions/artifact"
- },
- "live-iso": {
- "$id":"#/properties/images/properties/live-iso",
- "type":"object",
- "title":"Live ISO",
- "$ref": "#/definitions/artifact"
- },
- "live-rootfs": {
- "$id":"#/properties/images/properties/live-rootfs",
- "type":"object",
- "title":"Live Rootfs",
- "$ref": "#/definitions/artifact"
- },
- "openstack": {
- "$id":"#/properties/images/properties/openstack",
- "type":"object",
- "title":"OpenStack",
- "$ref": "#/definitions/artifact"
- },
- "vmware": {
- "$id":"#/properties/images/properties/vmware",
- "type":"object",
- "title":"VMWare",
- "$ref": "#/definitions/artifact"
- },
- "vultr": {
- "$id": "#/properties/images/properties/vultr",
- "type": "object",
- "title": "Vultr",
- "$ref": "#/definitions/artifact"
- },
- "aliyun": {
- "$id":"#/properties/images/properties/aliyun",
- "type":"object",
- "title":"Aliyun",
- "$ref": "#/definitions/artifact"
- },
- "aws": {
- "$id":"#/properties/images/properties/aws",
- "type":"object",
- "title":"AWS",
- "$ref": "#/definitions/artifact"
- },
- "azure": {
- "$id":"#/properties/images/properties/azure",
- "type":"object",
- "title":"Azure",
- "$ref": "#/definitions/artifact"
- },
- "azurestack": {
- "$id":"#/properties/images/properties/azurestack",
- "type":"object",
- "title":"AzureStack",
- "$ref": "#/definitions/artifact"
- },
- "digitalocean": {
- "$id":"#/properties/images/properties/digitalocean",
- "type":"object",
- "title":"DigitalOcean",
- "$ref": "#/definitions/artifact"
- },
- "ibmcloud": {
- "$id":"#/properties/images/properties/ibmcloud",
- "type":"object",
- "title":"IBM Cloud",
- "$ref": "#/definitions/artifact"
- },
- "gcp": {
- "$id":"#/properties/images/properties/gcp",
- "type":"object",
- "title":"GCP",
- "$ref": "#/definitions/artifact"
- }
- }
- },
- "name": {
- "$id":"#/properties/name",
- "type":"string",
- "title":"Name",
- "default":"fedora-coreos",
- "examples": [
- "rhcos",
- "fedora-coreos"
- ]
- },
- "oscontainer": {
- "$id":"#/properties/oscontainer",
- "type":"object",
- "title":"Oscontainer",
- "$ref": "#/definitions/image"
- },
- "extensions": {
- "$id":"#/properties/extensions",
- "type":"object",
- "title":"Extensions",
- "required": [
- "path",
- "sha256",
- "rpm-ostree-state",
- "manifest"
- ],
- "properties": {
- "path": {
- "$id": "#/artifact/Path",
- "type":"string",
- "title":"Path"
- },
- "sha256": {
- "$id": "#/artifact/sha256",
- "type":"string",
- "title":"SHA256"
- },
- "rpm-ostree-state": {
- "$id":"#/properties/extensions/items/properties/rpm-ostree-state",
- "type":"string",
- "title":"RpmOstreeState",
- "default":"",
- "minLength": 64
- },
- "manifest": {
- "$id":"#/properties/extensions/items/properties/manifest",
- "type":"object",
- "title":"Manifest"
- }
- }
- },
- "ostree-commit": {
- "$id":"#/properties/ostree-commit",
- "type":"string",
- "title":"ostree-commit",
- "default":"",
- "minLength": 64
- },
- "ostree-content-bytes-written": {
- "$id":"#/properties/ostree-content-bytes-written",
- "type":"integer",
- "title":"ostree-content-bytes-written",
- "default": 0
- },
- "ostree-content-checksum": {
- "$id":"#/properties/ostree-content-checksum",
- "type":"string",
- "title":"ostree-content-checksum",
- "default":"",
- "minLength": 64
- },
- "ostree-n-cache-hits": {
- "$id":"#/properties/ostree-n-cache-hits",
- "type":"integer",
- "title":"ostree-n-cache-hits",
- "default": 0
- },
- "ostree-n-content-total": {
- "$id":"#/properties/ostree-n-content-total",
- "type":"integer",
- "title":"ostree-n-content-total",
- "default": 0
- },
- "ostree-n-content-written": {
- "$id":"#/properties/ostree-n-content-written",
- "type":"integer",
- "title":"ostree-n-content-written",
- "default": 0
- },
- "ostree-n-metadata-total": {
- "$id":"#/properties/ostree-n-metadata-total",
- "type":"integer",
- "title":"ostree-n-metadata-total",
- "default": 0
- },
- "ostree-n-metadata-written": {
- "$id":"#/properties/ostree-n-metadata-written",
- "type":"integer",
- "title":"ostree-n-metadata-written",
- "default": 0
- },
- "ostree-timestamp": {
- "$id":"#/properties/ostree-timestamp",
- "type":"string",
- "title":"ostree timestamp",
- "default":"",
- "examples": [
- "2020-01-15T19:31:31Z"
- ],
- "pattern":"\\d{4}-\\d{2}-\\d{2}T.*Z$"
- },
- "ostree-version": {
- "$id":"#/properties/ostree-version",
- "type":"string",
- "title":"ostree version",
- "default":"",
- "minLength": 1
- },
- "pkgdiff": {
- "$id":"#/properties/pkgdiff",
- "type":"array",
- "title":"pkgdiff between builds",
- "$ref": "#/definitions/pkg-items"
- },
- "parent-pkgdiff": {
- "$id":"#/properties/parent-pkgdiff",
- "type":"array",
- "title":"pkgdiff against parent",
- "$ref": "#/definitions/pkg-items"
- },
- "advisories-diff": {
- "$id":"#/properties/advisories-diff",
- "type":"array",
- "title":"advisory diff between builds",
- "$ref": "#/definitions/advisory-items"
- },
- "parent-advisories-diff": {
- "$id":"#/properties/parent-advisory-diff",
- "type":"array",
- "title":"advisory diff against parent",
- "$ref": "#/definitions/advisory-items"
- },
- "rpm-ostree-inputhash": {
- "$id":"#/properties/rpm-ostree-inputhash",
- "type":"string",
- "title":"input has of the rpm-ostree",
- "default":"",
- "minLength": 64
- },
- "summary": {
- "$id":"#/properties/summary",
- "type":"string",
- "title":"Build Summary",
- "default":"",
- "minLength": 1
- },
- "aliyun": {
- "$id":"#/properties/aliyun",
- "type":"array",
- "title":"Alibaba/Aliyun Uploads",
- "items": {
- "$id":"#/properties/aliyun/images",
- "type":"object",
- "title":"Aliyun Image",
- "required": [
- "name",
- "id"
- ],
- "properties": {
- "name": {
- "$id":"#/properties/aliyun/items/properties/name",
- "type":"string",
- "title":"Region",
- "default":"",
- "minLength": 1
- },
- "id": {
- "$id":"#/properties/aliyun/items/properties/id",
- "type":"string",
- "title":"ImageID",
- "default":"",
- "minLength": 1
- }
- }
- }
- },
- "amis": {
- "$id":"#/properties/amis",
- "type":"array",
- "title":"AMIS",
- "items": {
- "$id":"#/properties/amis/items",
- "type":"object",
- "title":"AMIS",
- "required": [
- "name",
- "hvm",
- "snapshot"
- ],
- "properties": {
- "name": {
- "$id":"#/properties/amis/items/properties/name",
- "type":"string",
- "title":"Region",
- "default":""
- },
- "hvm": {
- "$id":"#/properties/amis/items/properties/hvm",
- "type":"string",
- "title":"HVM",
- "default":""
- },
- "snapshot": {
- "$id":"#/properties/amis/items/properties/snapshot",
- "type":"string",
- "title":"Snapshot",
- "default":""
- }
- }
- }
- },
- "azure": {
- "$id":"#/properties/azure",
- "type":"object",
- "title":"Azure",
- "$ref": "#/definitions/cloudartifact"
- },
- "gcp": {
- "$id":"#/properties/gcp",
- "type":"object",
- "title":"GCP",
- "required": [
- "image",
- "url"
- ],
- "optional": [
- "family",
- "project"
- ],
- "properties": {
- "image": {
- "$id":"#/properties/gcp/image",
- "type":"string",
- "title":"Image Name"
- },
- "url": {
- "$id":"#/properties/gcp/url",
- "type":"string",
- "title":"URL"
- },
- "project": {
- "$id":"#/properties/gcp/project",
- "type":"string",
- "title":"Image Project"
- },
- "family": {
- "$id":"#/properties/gcp/family",
- "type":"string",
- "title":"Image Family"
- }
- }
- },
- "release-payload": {
- "$id":"#/properties/release-payload",
- "type":"object",
- "title":"ReleasePayload",
- "$ref": "#/definitions/image"
- }
- }
-}
-`
-```
-
-## func BuilderArch
-
-```go
-func BuilderArch() string
-```
-
-BuilderArch converts the GOARCH to the build arch\. In other words\, it translates amd64 to x86\_64\.
-
-## func CanArtifact
-
-```go
-func CanArtifact(artifact string) bool
-```
-
-CanArtifact reports whether an artifact name is buildable by COSA based on the meta\.json name\. CanArtifact is used to signal if the artifact is a known artifact type\.
-
-## func GetCommandBuildableArtifacts
-
-```go
-func GetCommandBuildableArtifacts() []string
-```
-
-GetCommandBuildableArtifacts returns the string name of buildable artifacts through the \`cosa build\-\*\` CLI\.
-
-## func IsMetaJSON
-
-```go
-func IsMetaJSON(path string) bool
-```
-
-IsMetaJSON is a helper for identifying if a file is meta\.json
-
-## func Open
-
-```go
-func Open(p string) (io.ReadCloser, error)
-```
-
-Open calls the backend's open function\.
-
-## func SetArch
-
-```go
-func SetArch(a string)
-```
-
-SetArch overrides the build arch
-
-## func SetIOBackendFile
-
-```go
-func SetIOBackendFile()
-```
-
-SetIOBackendFile sets the backend to the default file backend\.
-
-## func SetIOBackendMinio
-
-```go
-func SetIOBackendMinio(ctx context.Context, m *minio.Client) error
-```
-
-SetIOBackendMinio sets the backend to minio\. The client must be provided by the caller\, including authorization\.
-
-## func SetSchemaFromFile
-
-```go
-func SetSchemaFromFile(r io.Reader) error
-```
-
-SetSchemaFromFile sets the validation JSON Schema
-
-## func defaultWalkFunc
-
-```go
-func defaultWalkFunc(p string) <-chan fileInfo
-```
-
-defaultWalkFunc walks over a directory and returns a channel of os\.FileInfo
-
-## func init
-
-```go
-func init()
-```
-
-## type AdvisoryDiff
-
-```go
-type AdvisoryDiff []AdvisoryDiffItems
-```
-
-## type AdvisoryDiffItems
-
-```go
-type AdvisoryDiffItems interface{}
-```
-
-## type AliyunImage
-
-```go
-type AliyunImage struct {
- ImageID string `json:"id"`
- Region string `json:"name"`
-}
-```
-
-## type Amis
-
-```go
-type Amis struct {
- Hvm string `json:"hvm"`
- Region string `json:"name"`
- Snapshot string `json:"snapshot"`
-}
-```
-
-## type Artifact
-
-```go
-type Artifact struct {
- Path string `json:"path"`
- Sha256 string `json:"sha256"`
- SizeInBytes float64 `json:"size,omitempty"`
- UncompressedSha256 string `json:"uncompressed-sha256,omitempty"`
- UncompressedSize int `json:"uncompressed-size,omitempty"`
-}
-```
-
-## type Build
-
-```go
-type Build struct {
- AdvisoryDiffAgainstParent AdvisoryDiff `json:"parent-advisories-diff,omitempty"`
- AdvisoryDiffBetweenBuilds AdvisoryDiff `json:"advisories-diff,omitempty"`
- AlibabaAliyunUploads []AliyunImage `json:"aliyun,omitempty"`
- Amis []Amis `json:"amis,omitempty"`
- Architecture string `json:"coreos-assembler.basearch,omitempty"`
- Azure *Cloudartifact `json:"azure,omitempty"`
- BuildArtifacts *BuildArtifacts `json:"images,omitempty"`
- BuildID string `json:"buildid"`
- BuildRef string `json:"ref,omitempty"`
- BuildSummary string `json:"summary"`
- BuildTimeStamp string `json:"coreos-assembler.build-timestamp,omitempty"`
- BuildURL string `json:"build-url,omitempty"`
- ConfigGitRev string `json:"coreos-assembler.config-gitrev,omitempty"`
- ContainerConfigGit *Git `json:"coreos-assembler.container-config-git,omitempty"`
- CoreOsSource string `json:"coreos-assembler.code-source,omitempty"`
- CosaContainerImageGit *Git `json:"coreos-assembler.container-image-git,omitempty"`
- CosaDelayedMetaMerge bool `json:"coreos-assembler.delayed-meta-merge,omitempty"`
- CosaImageChecksum string `json:"coreos-assembler.image-config-checksum,omitempty"`
- CosaImageVersion int `json:"coreos-assembler.image-genver,omitempty"`
- Extensions *Extensions `json:"extensions,omitempty"`
- FedoraCoreOsParentCommit string `json:"fedora-coreos.parent-commit,omitempty"`
- FedoraCoreOsParentVersion string `json:"fedora-coreos.parent-version,omitempty"`
- Gcp *Gcp `json:"gcp,omitempty"`
- GitDirty string `json:"coreos-assembler.config-dirty,omitempty"`
- ImageInputChecksum string `json:"coreos-assembler.image-input-checksum,omitempty"`
- InputHasOfTheRpmOstree string `json:"rpm-ostree-inputhash"`
- Koji *Koji `json:"koji,omitempty"`
- MetaStamp float64 `json:"coreos-assembler.meta-stamp,omitempty"`
- Name string `json:"name"`
- Oscontainer *Image `json:"oscontainer,omitempty"`
- OstreeCommit string `json:"ostree-commit"`
- OstreeContentBytesWritten int `json:"ostree-content-bytes-written,omitempty"`
- OstreeContentChecksum string `json:"ostree-content-checksum"`
- OstreeNCacheHits int `json:"ostree-n-cache-hits,omitempty"`
- OstreeNContentTotal int `json:"ostree-n-content-total,omitempty"`
- OstreeNContentWritten int `json:"ostree-n-content-written,omitempty"`
- OstreeNMetadataTotal int `json:"ostree-n-metadata-total,omitempty"`
- OstreeNMetadataWritten int `json:"ostree-n-metadata-written,omitempty"`
- OstreeTimestamp string `json:"ostree-timestamp"`
- OstreeVersion string `json:"ostree-version"`
- OverridesActive bool `json:"coreos-assembler.overrides-active,omitempty"`
- PkgdiffAgainstParent PackageSetDifferences `json:"parent-pkgdiff,omitempty"`
- PkgdiffBetweenBuilds PackageSetDifferences `json:"pkgdiff,omitempty"`
- ReleasePayload *Image `json:"release-payload,omitempty"`
-}
-```
-
-### func ParseBuild
-
-```go
-func ParseBuild(path string) (*Build, error)
-```
-
-ParseBuild parses the meta\.json and reutrns a build
-
-### func ReadBuild
-
-```go
-func ReadBuild(dir, buildID, arch string) (*Build, string, error)
-```
-
-ReadBuild returns a build upon finding a meta\.json\. Returns a Build\, the path string to the build\, and an error \(if any\)\. If the buildID is not set\, "latest" is assumed\.
-
-### func buildParser
-
-```go
-func buildParser(r io.Reader) (*Build, error)
-```
-
-### func \(\*Build\) GetArtifact
-
-```go
-func (build *Build) GetArtifact(artifact string) (*Artifact, error)
-```
-
-GetArtifact returns an artifact by JSON tag
-
-### func \(\*Build\) IsArtifact
-
-```go
-func (build *Build) IsArtifact(path string) (string, bool)
-```
-
-IsArtifact takes a path and returns the artifact type and a bool if the artifact is described in the build\.
-
-### func \(\*Build\) Validate
-
-```go
-func (build *Build) Validate() []error
-```
-
-Validate checks the build against the schema\.
-
-### func \(\*Build\) WriteMeta
-
-```go
-func (build *Build) WriteMeta(path string, validate bool) error
-```
-
-WriteMeta records the meta\-data\. Writes are local only\.
-
-### func \(\*Build\) artifacts
-
-```go
-func (build *Build) artifacts() map[string]*Artifact
-```
-
-artifact returns a string map of artifacts\, where the key is the JSON tag\. Reflection was over a case statement to make meta\.json and the schema authoritative for adding and removing artifacts\.
-
-### func \(\*Build\) mergeMeta
-
-```go
-func (b *Build) mergeMeta(r io.Reader) error
-```
-
-mergeMeta uses JSON to merge in the data
-
-## type BuildArtifacts
-
-```go
-type BuildArtifacts struct {
- Aliyun *Artifact `json:"aliyun,omitempty"`
- Aws *Artifact `json:"aws,omitempty"`
- Azure *Artifact `json:"azure,omitempty"`
- AzureStack *Artifact `json:"azurestack,omitempty"`
- Dasd *Artifact `json:"dasd,omitempty"`
- DigitalOcean *Artifact `json:"digitalocean,omitempty"`
- Exoscale *Artifact `json:"exoscale,omitempty"`
- Gcp *Artifact `json:"gcp,omitempty"`
- IbmCloud *Artifact `json:"ibmcloud,omitempty"`
- Initramfs *Artifact `json:"initramfs,omitempty"`
- Iso *Artifact `json:"iso,omitempty"`
- Kernel *Artifact `json:"kernel,omitempty"`
- LiveInitramfs *Artifact `json:"live-initramfs,omitempty"`
- LiveIso *Artifact `json:"live-iso,omitempty"`
- LiveKernel *Artifact `json:"live-kernel,omitempty"`
- LiveRootfs *Artifact `json:"live-rootfs,omitempty"`
- Metal *Artifact `json:"metal,omitempty"`
- Metal4KNative *Artifact `json:"metal4k,omitempty"`
- OpenStack *Artifact `json:"openstack,omitempty"`
- Ostree Artifact `json:"ostree"`
- Qemu *Artifact `json:"qemu,omitempty"`
- Vmware *Artifact `json:"vmware,omitempty"`
- Vultr *Artifact `json:"vultr,omitempty"`
-}
-```
-
-## type Cloudartifact
-
-```go
-type Cloudartifact struct {
- Image string `json:"image"`
- URL string `json:"url"`
-}
-```
-
-## type Extensions
-
-```go
-type Extensions struct {
- Manifest map[string]interface{} `json:"manifest"`
- Path string `json:"path"`
- RpmOstreeState string `json:"rpm-ostree-state"`
- Sha256 string `json:"sha256"`
-}
-```
-
-### func \(\*Extensions\) toArtifact
-
-```go
-func (e *Extensions) toArtifact() *Artifact
-```
-
-toArtifact converts an Extension to an Artifact
-
-## type Gcp
-
-```go
-type Gcp struct {
- ImageFamily string `json:"family,omitempty"`
- ImageName string `json:"image"`
- ImageProject string `json:"project,omitempty"`
- URL string `json:"url"`
-}
-```
-
-## type Git
-
-```go
-type Git struct {
- Branch string `json:"branch,omitempty"`
- Commit string `json:"commit"`
- Dirty string `json:"dirty,omitempty"`
- Origin string `json:"origin"`
-}
-```
-
-## type Image
-
-```go
-type Image struct {
- Comment string `json:"comment,omitempty"`
- Digest string `json:"digest"`
- Image string `json:"image"`
-}
-```
-
-## type Koji
-
-```go
-type Koji struct {
- BuildRelease string `json:"release,omitempty"`
- KojiBuildID float64 `json:"build_id,omitempty"`
- KojiToken string `json:"token,omitempty"`
-}
-```
-
-## type PackageSetDifferences
-
-```go
-type PackageSetDifferences []PackageSetDifferencesItems
-```
-
-## type PackageSetDifferencesItems
-
-```go
-type PackageSetDifferencesItems interface{}
-```
-
-## type build
-
-build represents the build struct in a buildJSON
-
-```go
-type build struct {
- ID string `json:"id"`
- Arches []string `json:"arches"`
-}
-```
-
-## type buildsJSON
-
-buildsJSON represents the JSON that records the builds TODO: this should be generated by a schema
-
-```go
-type buildsJSON struct {
- SchemaVersion string `json:"schema-version"`
- Builds []build `json:"builds"`
- TimeStamp string `json:"timestamp"`
-}
-```
-
-### func getBuilds
-
-```go
-func getBuilds(dir string) (*buildsJSON, error)
-```
-
-### func \(\*buildsJSON\) getLatest
-
-```go
-func (b *buildsJSON) getLatest(arch string) (string, bool)
-```
-
-getLatest returns the latest build for the arch\.
-
-## type fileInfo
-
-```go
-type fileInfo interface {
- Name() string // base name of the file
- Size() int64 // length in bytes for regular files; system-dependent for others
- Mode() fileMode // file mode bits
- ModTime() time.Time // modification time
- IsDir() bool // abbreviation for Mode().IsDir()
- Sys() interface{} // underlying data source (can return nil)
-}
-```
-
-objectInfo implements the os\.FileInfo interface\. This allows for abstracting any file or object to be compared as if they were local files regardless of location\.
-
-```go
-var _ fileInfo = &objectInfo{}
-```
-
-## type fileMode
-
-TODO: drop with GoLang 1\.16\. This is a backport of the interface from 1\.16\. var \_ os\.FileInfo = &objectInfo\{\}
-
-```go
-type fileMode uint32
-```
-
-## type ioBackendFile
-
-ioBackendFile is a file based backend
-
-```go
-type ioBackendFile struct {
- *os.File
- path string
-}
-```
-
-### func \(\*ioBackendFile\) Name
-
-```go
-func (i *ioBackendFile) Name() string
-```
-
-### func \(\*ioBackendFile\) Open
-
-```go
-func (i *ioBackendFile) Open(p string) (io.ReadCloser, error)
-```
-
-Open implements ioBackender Open interface\.
-
-## type ioBackendMinio
-
-ioBackendMinio is a minio based backend
-
-```go
-type ioBackendMinio struct {
- ctx context.Context
- m *minio.Client
- obj *minio.Object
- name string
-}
-```
-
-### func \(\*ioBackendMinio\) Open
-
-```go
-func (im *ioBackendMinio) Open(p string) (io.ReadCloser, error)
-```
-
-Open implements ioBackender's and os\.File's Open interface\.
-
-## type ioBackender
-
-ioBackender is the basic interface\.
-
-```go
-type ioBackender interface {
- Open(string) (io.ReadCloser, error)
-}
-```
-
-ioBackendMinio is an ioBackender\.
-
-```go
-var _ ioBackender = &ioBackendMinio{}
-```
-
-default ioBackend is file backend
-
-```go
-var ioBackend ioBackender = new(ioBackendFile)
-```
-
-### func newBackend
-
-```go
-func newBackend() ioBackender
-```
-
-newBackend returns a new backend
-
-## type objectInfo
-
-objectInfo holds basic information about either a file object or a remote minio object\.
-
-```go
-type objectInfo struct {
- info minio.ObjectInfo
- name string
-}
-```
-
-### func \(\*objectInfo\) IsDir
-
-```go
-func (ao *objectInfo) IsDir() bool
-```
-
-IsDir implements the os\.FileInfo IsDir func\. For minio objects\, the answer is always false\.
-
-### func \(\*objectInfo\) ModTime
-
-```go
-func (ao *objectInfo) ModTime() time.Time
-```
-
-ModTime implements the os\.FileInfo ModTime func\. The returned value is remote aodification time\.
-
-### func \(\*objectInfo\) Mode
-
-```go
-func (ao *objectInfo) Mode() fileMode
-```
-
-Mode implements the os\.FileInfo Mode func\. Since there is not simple way to convert an ACL into Unix permisions\, it blindly returns 0644\.
-
-### func \(\*objectInfo\) Name
-
-```go
-func (ao *objectInfo) Name() string
-```
-
-Name implements the os\.FileInfo interface Name func\.
-
-### func \(\*objectInfo\) Size
-
-```go
-func (ao *objectInfo) Size() int64
-```
-
-Size implements the os\.FileInfo size func\.
-
-### func \(\*objectInfo\) Sys
-
-```go
-func (ao *objectInfo) Sys() interface{}
-```
-
-Sys implements the os\.FileInfo interface Sys func\. The interface spec allows for returning a nil\.
-
-## type walkerFn
-
-walkerFn is a function that implements the walk func
-
-```go
-type walkerFn func(string) <-chan fileInfo
-```
-
-walkFn is used to walk paths
-
-```go
-var walkFn walkerFn = defaultWalkFunc
-```
-
-### func createMinioWalkFunc
-
-```go
-func createMinioWalkFunc(m *minio.Client) walkerFn
-```
-
-createMinioWalkFunc creates a new func a minio client\. The returned function will list the remote objects and return os\.FileInfo compliant interfaces\.
-
-
-
-Generated by [gomarkdoc]()
diff --git a/docs/gangplank/api-ocp.md b/docs/gangplank/api-ocp.md
deleted file mode 100755
index c7789b2e1e78e572e31c835bd3c241cf4ce96b3e..0000000000000000000000000000000000000000
--- a/docs/gangplank/api-ocp.md
+++ /dev/null
@@ -1,1876 +0,0 @@
-
-
-# ocp
-
-```go
-import "github.com/coreos/gangplank/ocp"
-```
-
-## Index
-
-- [Constants](<#constants>)
-- [Variables](<#variables>)
-- [func GetClient(ctx ClusterContext) (*kubernetes.Clientset, string, error)](<#func-getclient>)
-- [func checkPort(port int) error](<#func-checkport>)
-- [func clusterRunner(term termChan, cp CosaPodder, envVars []v1.EnvVar) error](<#func-clusterrunner>)
-- [func copyFile(src, dest string) error](<#func-copyfile>)
-- [func cosaInit() error](<#func-cosainit>)
-- [func decompress(in io.ReadCloser, dir string) error](<#func-decompress>)
-- [func getHostname() string](<#func-gethostname>)
-- [func getLocalFileStamp(path string) (int64, error)](<#func-getlocalfilestamp>)
-- [func getNetIP() (string, error)](<#func-getnetip>)
-- [func getPodIP(cs *kubernetes.Clientset, podNamespace, podName string) (string, error)](<#func-getpodip>)
-- [func getPortOrNext(port int) int](<#func-getportornext>)
-- [func getPushTagless(s string) (string, string)](<#func-getpushtagless>)
-- [func init()](<#func-init>)
-- [func isKnownBuildMeta(n string) bool](<#func-isknownbuildmeta>)
-- [func k8sInClusterClient() (*kubernetes.Clientset, string, error)](<#func-k8sinclusterclient>)
-- [func kubernetesSecretsSetup(ac *kubernetes.Clientset, ns, toDir string) ([]string, error)](<#func-kubernetessecretssetup>)
-- [func mustHaveImage(ctx context.Context, image string) error](<#func-musthaveimage>)
-- [func newFileStamp() string](<#func-newfilestamp>)
-- [func ocpBuildClient() error](<#func-ocpbuildclient>)
-- [func podmanRunner(term termChan, cp CosaPodder, envVars []v1.EnvVar) error](<#func-podmanrunner>)
-- [func ptrBool(b bool) *bool](<#func-ptrbool>)
-- [func ptrInt(i int64) *int64](<#func-ptrint>)
-- [func ptrInt32(i int32) *int32](<#func-ptrint32>)
-- [func pushOstreeToRegistry(ctx ClusterContext, push *spec.Registry, build *cosa.Build) error](<#func-pushostreetoregistry>)
-- [func randomString(n int) (string, error)](<#func-randomstring>)
-- [func recieveInputBinary() (string, error)](<#func-recieveinputbinary>)
-- [func sshForwarder(ctx context.Context, cfg *SSHForwardPort) (chan<- bool, error)](<#func-sshforwarder>)
-- [func streamPodLogs(client *kubernetes.Clientset, namespace string, pod *v1.Pod, term termChan) error](<#func-streampodlogs>)
-- [func tokenRegistryLogin(ctx ClusterContext, tlsVerify *bool, registry string) error](<#func-tokenregistrylogin>)
-- [func uploadCustomBuildContainer(ctx ClusterContext, tlsVerify *bool, apiBuild *buildapiv1.Build, build *cosa.Build) error](<#func-uploadcustombuildcontainer>)
-- [func uploadPathAsTarBall(ctx context.Context, bucket, object, path, workDir string, sudo bool, r *Return) error](<#func-uploadpathastarball>)
-- [func writeDockerSecret(ctx ClusterContext, clusterSecretName, authPath string) error](<#func-writedockersecret>)
-- [func writeToWriters(l *log.Entry, in io.ReadCloser, outs ...io.Writer) <-chan error](<#func-writetowriters>)
-- [type Builder](<#type-builder>)
- - [func NewBuilder(ctx ClusterContext) (Builder, error)](<#func-newbuilder>)
-- [type Cluster](<#type-cluster>)
- - [func GetCluster(ctx ClusterContext) (*Cluster, error)](<#func-getcluster>)
- - [func (c *Cluster) GetStdIO() (*os.File, *os.File, *os.File)](<#func-cluster-getstdio>)
- - [func (c *Cluster) SetPodman(srvDir string)](<#func-cluster-setpodman>)
- - [func (c *Cluster) SetRemoteCluster(kubeConfig, namespace string)](<#func-cluster-setremotecluster>)
- - [func (c *Cluster) SetStdIO(stdIn, stdOut, stdErr *os.File)](<#func-cluster-setstdio>)
- - [func (c *Cluster) toKubernetesCluster() *KubernetesCluster](<#func-cluster-tokubernetescluster>)
-- [type ClusterContext](<#type-clustercontext>)
- - [func NewClusterContext(ctx context.Context, kc KubernetesCluster) ClusterContext](<#func-newclustercontext>)
-- [type CosaPodder](<#type-cosapodder>)
- - [func NewCosaPodder(
- ctx ClusterContext,
- apiBuild *buildapiv1.Build,
- index int) (CosaPodder, error)](<#func-newcosapodder>)
- - [func NewHopPod(ctx ClusterContext, image, serviceAccount, workDir string, js *spec.JobSpec) CosaPodder](<#func-newhoppod>)
-- [type KubernetesCluster](<#type-kubernetescluster>)
- - [func NewCluster(inCluster bool) KubernetesCluster](<#func-newcluster>)
-- [type PodBuilder](<#type-podbuilder>)
- - [func NewPodBuilder(ctx ClusterContext, image, serviceAccount, workDir string, js *spec.JobSpec) (PodBuilder, error)](<#func-newpodbuilder>)
-- [type RemoteFile](<#type-remotefile>)
- - [func getBuildMeta(jsonPath, keyPathBase string, m *minioServer, l *log.Entry) []*RemoteFile](<#func-getbuildmeta>)
- - [func getStageFiles(buildID string,
- l *log.Entry, m *minioServer, lastBuild *cosa.Build, s *spec.Stage) (*cosa.Build, []*RemoteFile, error)](<#func-getstagefiles>)
- - [func (r *RemoteFile) Extract(ctx context.Context, path string) error](<#func-remotefile-extract>)
- - [func (r *RemoteFile) WriteToPath(ctx context.Context, path string) error](<#func-remotefile-writetopath>)
-- [type Return](<#type-return>)
- - [func (r *Return) Run(ctx context.Context, ws *workSpec) error](<#func-return-run>)
-- [type Returner](<#type-returner>)
-- [type SSHForwardPort](<#type-sshforwardport>)
- - [func getSshMinioForwarder(j *spec.JobSpec) *SSHForwardPort](<#func-getsshminioforwarder>)
-- [type SecretMapper](<#type-secretmapper>)
-- [type buildConfig](<#type-buildconfig>)
- - [func newBC(ctx context.Context, c *Cluster) (*buildConfig, error)](<#func-newbc>)
- - [func (bc *buildConfig) Exec(ctx ClusterContext) (err error)](<#func-buildconfig-exec>)
- - [func (bc *buildConfig) discoverStages(m *minioServer) ([]*RemoteFile, error)](<#func-buildconfig-discoverstages>)
- - [func (bc *buildConfig) ocpBinaryInput(m *minioServer) ([]*RemoteFile, error)](<#func-buildconfig-ocpbinaryinput>)
-- [type byteFields](<#type-bytefields>)
-- [type clusterCtxKey](<#type-clusterctxkey>)
-- [type consoleLogWriter](<#type-consolelogwriter>)
- - [func newConsoleLogWriter(prefix string) *consoleLogWriter](<#func-newconsolelogwriter>)
- - [func (cw *consoleLogWriter) Write(b []byte) (int, error)](<#func-consolelogwriter-write>)
-- [type cosaPod](<#type-cosapod>)
- - [func (cp *cosaPod) GetClusterCtx() ClusterContext](<#func-cosapod-getclusterctx>)
- - [func (cp *cosaPod) WorkerRunner(term termChan, envVars []v1.EnvVar) error](<#func-cosapod-workerrunner>)
- - [func (cp *cosaPod) addVolumeFromObjectLabel(obj metav1.Object, fields stringFields) error](<#func-cosapod-addvolumefromobjectlabel>)
- - [func (cp *cosaPod) addVolumesFromConfigMapLabels() error](<#func-cosapod-addvolumesfromconfigmaplabels>)
- - [func (cp *cosaPod) addVolumesFromSecretLabels() error](<#func-cosapod-addvolumesfromsecretlabels>)
- - [func (cp *cosaPod) getPodSpec(envVars []v1.EnvVar) (*v1.Pod, error)](<#func-cosapod-getpodspec>)
-- [type hopPod](<#type-hoppod>)
- - [func (h *hopPod) GetClusterCtx() ClusterContext](<#func-hoppod-getclusterctx>)
- - [func (h *hopPod) WorkerRunner(term termChan, _ []v1.EnvVar) error](<#func-hoppod-workerrunner>)
- - [func (h *hopPod) getPodSpec([]v1.EnvVar) (*v1.Pod, error)](<#func-hoppod-getpodspec>)
-- [type minioServer](<#type-minioserver>)
- - [func StartStandaloneMinioServer(ctx context.Context, srvDir, cfgFile string, overSSH *SSHForwardPort) (*minioServer, error)](<#func-startstandaloneminioserver>)
- - [func minioCfgFromFile(f string) (mk minioServer, err error)](<#func-miniocfgfromfile>)
- - [func minioCfgReader(in io.Reader) (m minioServer, err error)](<#func-miniocfgreader>)
- - [func newMinioServer(cfgFile string) *minioServer](<#func-newminioserver>)
- - [func (m *minioServer) Exists(bucket, object string) bool](<#func-minioserver-exists>)
- - [func (m *minioServer) Kill()](<#func-minioserver-kill>)
- - [func (m *minioServer) Wait()](<#func-minioserver-wait>)
- - [func (m *minioServer) WriteJSON(w io.Writer) error](<#func-minioserver-writejson>)
- - [func (m *minioServer) WriteToFile(f string) error](<#func-minioserver-writetofile>)
- - [func (m *minioServer) client() (*minio.Client, error)](<#func-minioserver-client>)
- - [func (m *minioServer) ensureBucketExists(ctx context.Context, bucket string) error](<#func-minioserver-ensurebucketexists>)
- - [func (m *minioServer) exec(ctx context.Context) error](<#func-minioserver-exec>)
- - [func (m *minioServer) fetcher(ctx context.Context, bucket, object string, dest io.Writer) error](<#func-minioserver-fetcher>)
- - [func (m *minioServer) getStamp(bucket, object string) (int64, error)](<#func-minioserver-getstamp>)
- - [func (m *minioServer) isLocalNewer(bucket, object string, path string) (bool, error)](<#func-minioserver-islocalnewer>)
- - [func (m *minioServer) putter(ctx context.Context, bucket, object, fpath string) error](<#func-minioserver-putter>)
- - [func (m *minioServer) stampFile(bucket, object string) error](<#func-minioserver-stampfile>)
- - [func (m *minioServer) start(ctx context.Context) error](<#func-minioserver-start>)
-- [type mountReferance](<#type-mountreferance>)
-- [type outWriteCloser](<#type-outwritecloser>)
- - [func newNoopFileWriterCloser(f *os.File) *outWriteCloser](<#func-newnoopfilewritercloser>)
- - [func (o *outWriteCloser) Close() error](<#func-outwritecloser-close>)
-- [type podBuild](<#type-podbuild>)
- - [func (pb *podBuild) Exec(ctx ClusterContext) error](<#func-podbuild-exec>)
- - [func (pb *podBuild) encodeAPIBuild() (string, error)](<#func-podbuild-encodeapibuild>)
- - [func (pb *podBuild) generateAPIBuild() error](<#func-podbuild-generateapibuild>)
- - [func (pb *podBuild) setInCluster() error](<#func-podbuild-setincluster>)
-- [type podmanRunnerFunc](<#type-podmanrunnerfunc>)
-- [type secretMap](<#type-secretmap>)
- - [func getSecretMapping(s string) (*secretMap, bool)](<#func-getsecretmapping>)
- - [func (sm *secretMap) writeSecretEnvVars(d map[string][]byte, ret *[]string) error](<#func-secretmap-writesecretenvvars>)
- - [func (sm *secretMap) writeSecretFiles(toDir, name string, d map[string][]byte, ret *[]string) error](<#func-secretmap-writesecretfiles>)
-- [type stringFields](<#type-stringfields>)
- - [func toStringFields(bf byteFields) stringFields](<#func-tostringfields>)
-- [type termChan](<#type-termchan>)
-- [type varMap](<#type-varmap>)
-- [type workSpec](<#type-workspec>)
- - [func newWorkSpec(ctx ClusterContext) (*workSpec, error)](<#func-newworkspec>)
- - [func (ws *workSpec) Exec(ctx ClusterContext) error](<#func-workspec-exec>)
- - [func (ws *workSpec) Marshal() ([]byte, error)](<#func-workspec-marshal>)
- - [func (ws *workSpec) Unmarshal(r io.Reader) error](<#func-workspec-unmarshal>)
- - [func (ws *workSpec) getEnvVars() ([]v1.EnvVar, error)](<#func-workspec-getenvvars>)
-
-
-## Constants
-
-```go
-const (
- // ocpStructTag is the struct tag used to read in
- // OCPBuilder from envvars
- ocpStructTag = "envVar"
-
- // defaultContextdir is the default path to use for a build
- defaultContextDir = "/srv"
-
- // secretLabelName is the label to search for secrets to automatically use
- secretLabelName = "coreos-assembler.coreos.com/secret"
-
- // cosaSrvCache is the location of the cache files
- cosaSrvCache = "/srv/cache"
-
- // cosaSrvTmpRepo is the location the repo files
- cosaSrvTmpRepo = "/srv/tmp/repo"
-
- // cacheTarballName is the name of the file used when Stage.{Require,Return}Cache is true
- cacheTarballName = "cache.tar.gz"
-
- // cacheRepoTarballName is the name of the file used when Stage.{Require,Return}RepoCache is true
- cacheRepoTarballName = "repo.tar.gz"
-
- // cacheBucket is used for storing the cache
- cacheBucket = "cache"
-)
-```
-
-```go
-const (
- kvmLabel = "devices.kubevirt.io/kvm"
- localPodEnvVar = "COSA_FORCE_NO_CLUSTER"
-)
-```
-
-```go
-const (
- envVarSourceURI = "SOURCE_URI"
- envVarSourceRef = "SOURCE_REF"
-)
-```
-
-```go
-const (
- clusterNamespaceFile = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
- serviceAccountTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
-)
-```
-
-```go
-const (
- podBuildLabel = "gangplank.coreos-assembler.coreos.com"
- podBuildAnnotation = podBuildLabel + "-%s"
- podBuildRunnerTag = "cosa-podBuild-runner"
-)
-```
-
-```go
-const (
- // sourceBin stores binary input
- sourceBin = "source.bin"
-
- // sourceSubPath is used when extracting binary inputs
- sourceSubPath = "source"
-)
-```
-
-CosaWorkPodEnvVarName is the envVar used to identify WorkSpec json\.
-
-```go
-const CosaWorkPodEnvVarName = "COSA_WORK_POD_JSON"
-```
-
-```go
-const fileStampName = "gangplank.coreos.com/cosa/stamp"
-```
-
-secretMountRefLabel is used for mounting of secrets
-
-```go
-const mountRefLabel = "coreos-assembler.coreos.com/mount-ref"
-```
-
-podmanContainerHostEnvVar is used by both Gangplank and the podman API to decide if the execution of the pod should happen over SSH\.
-
-```go
-const podmanContainerHostEnvVar = "CONTAINER_HOST"
-```
-
-## Variables
-
-```go
-var (
- gangwayCmd = "/usr/bin/gangway"
-
- // volumes are the volumes used in all pods created
- volumes = []v1.Volume{
- {
- Name: "srv",
- VolumeSource: v1.VolumeSource{
- EmptyDir: &v1.EmptyDirVolumeSource{
- Medium: "",
- },
- },
- },
- {
- Name: "pki-trust",
- VolumeSource: v1.VolumeSource{
- EmptyDir: &v1.EmptyDirVolumeSource{
- Medium: "",
- },
- },
- },
- {
- Name: "pki-anchors",
- VolumeSource: v1.VolumeSource{
- EmptyDir: &v1.EmptyDirVolumeSource{
- Medium: "",
- },
- },
- },
- {
- Name: "container-certs",
- VolumeSource: v1.VolumeSource{
- EmptyDir: &v1.EmptyDirVolumeSource{
- Medium: "",
- },
- },
- },
- }
-
- // volumeMounts are the common mounts used in all pods
- volumeMounts = []v1.VolumeMount{
- {
- Name: "srv",
- MountPath: "/srv",
- },
- {
- Name: "pki-trust",
- MountPath: "/etc/pki/ca-trust/extracted",
- },
- {
- Name: "pki-anchors",
- MountPath: "/etc/pki/ca-trust/anchors",
- },
- {
- Name: "container-certs",
- MountPath: "/etc/containers/cert.d",
- },
- }
-
- // Define basic envVars
- ocpEnvVars = []v1.EnvVar{
- {
-
- Name: "SSL_CERT_FILE",
- Value: "/etc/containers/cert.d/ca.crt",
- },
- {
- Name: "OSCONTAINER_CERT_DIR",
- Value: "/etc/containers/cert.d",
- },
- }
-
- // Define the Securite Contexts
- ocpSecContext = &v1.SecurityContext{}
-
- // On OpenShift 3.x, we require privileges.
- ocp3SecContext = &v1.SecurityContext{
- RunAsUser: ptrInt(0),
- RunAsGroup: ptrInt(1000),
- Privileged: ptrBool(true),
- }
-
- // InitCommands to be run before work in pod is executed.
- ocpInitCommand = []string{
- "mkdir -vp /etc/pki/ca-trust/extracted/{openssl,pem,java,edk2}",
-
- "cp -av /etc/pki/ca-trust/source/anchors2/*{crt,pem} /etc/pki/ca-trust/anchors/ || :",
-
- "cp -av /run/secrets/kubernetes.io/serviceaccount/ca.crt /etc/pki/ca-trust/anchors/cluster-ca.crt || :",
- "cp -av /run/secrets/kubernetes.io/serviceaccount/service-ca.crt /etc/pki/ca-trust/anchors/service-ca.crt || :",
-
- "update-ca-trust",
-
- "mkdir -vp /etc/containers/certs.d",
- "cat /run/secrets/kubernetes.io/serviceaccount/*crt >> /etc/containers/certs.d/ca.crt || :",
- "cat /etc/pki/ca-trust/extracted/pem/* >> /etc/containers/certs.d/ca.crt ||:",
- }
-
- // On OpenShift 3.x, /dev/kvm is unlikely to world RW. So we have to give ourselves
- // permission. Gangplank will run as root but `cosa` commands run as the builder
- // user. Note: on 4.x, gangplank will run unprivileged.
- ocp3InitCommand = append(ocpInitCommand,
- "/usr/bin/chmod 0666 /dev/kvm || echo missing kvm",
- "/usr/bin/stat /dev/kvm || :",
- )
-
- // Define the base requirements
- // cpu are in mils, memory is in mib
- baseCPU = *resource.NewQuantity(2, "")
- baseMem = *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI)
-
- ocp3Requirements = v1.ResourceList{
- v1.ResourceCPU: baseCPU,
- v1.ResourceMemory: baseMem,
- }
-
- ocpRequirements = v1.ResourceList{
- v1.ResourceCPU: baseCPU,
- v1.ResourceMemory: baseMem,
- kvmLabel: *resource.NewQuantity(1, ""),
- }
-)
-```
-
-```go
-var (
- // ErrNoSuchCloud is returned when the cloud is unknown
- ErrNoSuchCloud = errors.New("unknown cloud credential type")
-
- // ErrNoOCPBuildSpec is raised when no OCP envvars are found
- ErrNoOCPBuildSpec = errors.New("no OCP Build specification found")
-
- // ErrNotInCluster is used to singal that the host is not running in a
- // Kubernetes cluster
- ErrNotInCluster = errors.New("host is not in kubernetes cluster")
-
- // ErrInvalidOCPMode is used when there is no valid/supported mode the OCP
- // package. Currently this is thrown when neither a build client or kubernetes API
- // client can be initalized.
- ErrInvalidOCPMode = errors.New("program is not running as a buildconfig or with valid kubernetes service account")
-
- // ErrNoSourceInput is used to signal no source found.
- ErrNoSourceInput = errors.New("no source repo or binary payload defined")
-
- // ErrNotWorkPod is returned when the pod is not a work pod
- ErrNotWorkPod = errors.New("not a work pod")
-
- // ErrNoWorkFound is returned when the build client is neither a
- // workPod or BuildConfig.
- ErrNoWorkFound = errors.New("neither a buildconfig or workspec found")
-)
-```
-
-```go
-var (
- // These are used to parse the OpenShift API
- buildScheme = runtime.NewScheme()
- buildCodecFactory = serializer.NewCodecFactory(buildScheme)
- buildJSONCodec runtime.Codec
-
- // API Client for OpenShift builds.
- apiBuild *buildapiv1.Build
-)
-```
-
-```go
-var (
- // sudoBashCmd is used for shelling out to comamnds.
- sudoBashCmd = []string{"sudo", "bash", "-c"}
-
- // bashCmd is used for shelling out to commands
- bashCmd = []string{"bash", "-c"}
-)
-```
-
-consoleLogWriter is an io\.Writer\.
-
-```go
-var _ io.Writer = &consoleLogWriter{}
-```
-
-decompress is a spec TarDecompressorFunc
-
-```go
-var _ spec.TarDecompressorFunc = decompress
-```
-
-cosaSrvDir is where the build directory should be\. When the build API defines a contextDir then it will be used\. In most cases this should be /srv
-
-```go
-var cosaSrvDir = defaultContextDir
-```
-
-podTimeOut is the lenght of time to wait for a pod to complete its work\.
-
-```go
-var podTimeOut = 90 * time.Minute
-```
-
-```go
-var (
- // podmanCaps are the specific permissions we needed to run a podman
- // pod. This is a privileged pod.
- podmanCaps = []string{
- "CAP_DAC_READ_SEARCH",
- "CAP_LINUX_IMMUTABLE",
- "CAP_NET_BROADCAST",
- "CAP_NET_ADMIN",
- "CAP_IPC_LOCK",
- "CAP_IPC_OWNER",
- "CAP_SYS_MODULE",
- "CAP_SYS_RAWIO",
- "CAP_SYS_PTRACE",
- "CAP_SYS_PACCT",
- "CAP_SYS_ADMIN",
- "CAP_SYS_BOOT",
- "CAP_SYS_NICE",
- "CAP_SYS_RESOURCE",
- "CAP_SYS_TIME",
- "CAP_SYS_TTY_CONFIG",
- "CAP_LEASE",
- "CAP_AUDIT_CONTROL",
- "CAP_MAC_OVERRIDE",
- "CAP_MAC_ADMIN",
- "CAP_SYSLOG",
- "CAP_WAKE_ALARM",
- "CAP_BLOCK_SUSPEND",
- "CAP_AUDIT_READ",
- }
-)
-```
-
-```go
-var (
- // create the secret mappings for the supported Clouds
- secretMaps = []*secretMap{
-
- {
- label: "aliyun",
- fileVarMap: varMap{
- "config.json": "ALIYUN_CONFIG_FILE",
- },
- },
-
- {
- label: "aws",
- envVarMap: varMap{
- "aws_access_key_id": "AWS_ACCESS_KEY_ID",
- "aws_secret_access_key": "AWS_SECRET_ACCESS_KEY",
- "aws_default_region": "AWS_DEFAULT_REGION",
- "aws_ca_bundle": "AWS_CA_BUNDLE",
- },
- fileVarMap: varMap{
- "config": "AWS_CONFIG_FILE",
- },
- },
-
- {
- label: "aws-cn",
- fileVarMap: varMap{
- "config": "AWS_CN_CONFIG_FILE",
- },
- },
-
- {
- label: "azure",
- fileVarMap: varMap{
- "azure.json": "AZURE_CONFIG",
- "azure.pem": "AZURE_CERT_KEY",
- "azureProfile.json": "AZURE_PROFILE",
- },
- },
-
- {
- label: "gcp",
- fileVarMap: varMap{
-
- "gce.json": "GCP_IMAGE_UPLOAD_CONFIG",
- "gcp.json": "GCP_IMAGE_UPLOAD_CONFIG",
- },
- },
-
- {
- label: "internal-ca",
- fileVarMap: varMap{
- "ca.crt": "SSL_CERT_FILE",
- },
- },
-
- {
- label: "push-secret",
- fileVarMap: varMap{
- "docker.cfg": "PUSH_AUTH_JSON",
- },
- },
-
- {
- label: "pull-secret",
- fileVarMap: varMap{
- "docker.cfg": "PULL_AUTH_JSON",
- },
- },
-
- {
- label: "koji-keytab",
- fileVarMap: varMap{
- "keytab": "KOJI_KEYTAB",
- },
- envVarMap: varMap{
- "principal": "KOJI_PRINCIPAL",
- },
- },
- }
-)
-```
-
-srvBucket is the name of the bucket to use for remote files being served up
-
-```go
-var srvBucket = "source"
-```
-
-stageDependencyTimeOut is the length of time to wait for a stage's dependencies\.
-
-```go
-var stageDependencyTimeOut = 1 * time.Hour
-```
-
-```go
-var (
- volMaps = map[string]mountReferance{
-
- "internal-ca": {
- volumes: []v1.Volume{
- {
- Name: "pki",
- VolumeSource: v1.VolumeSource{
- Secret: &v1.SecretVolumeSource{
- DefaultMode: ptrInt32(444),
- SecretName: "",
- },
- },
- },
- },
- volumeMounts: []v1.VolumeMount{
- {
- Name: "pki",
- MountPath: "/etc/pki/ca-trust/source/anchors2/",
- },
- },
- },
-
- "docker.json": {
- volumes: []v1.Volume{
- {
- Name: "docker.json",
- VolumeSource: v1.VolumeSource{
- Secret: &v1.SecretVolumeSource{
- DefaultMode: ptrInt32(444),
- SecretName: "",
- },
- },
- },
- },
- volumeMounts: []v1.VolumeMount{
- {
- Name: "docker.json",
- MountPath: filepath.Join(cosaSrvDir, "secrets", "auths"),
- },
- },
- requireData: []string{"docker.json"},
- },
-
- "koji-ca": {
- volumes: []v1.Volume{
- {
- Name: "koji-ca",
- VolumeSource: v1.VolumeSource{
- ConfigMap: &v1.ConfigMapVolumeSource{
- LocalObjectReference: v1.LocalObjectReference{
- Name: "",
- },
- },
- },
- },
- },
- volumeMounts: []v1.VolumeMount{
- {
- Name: "koji-ca",
- MountPath: "/etc/pki/brew",
- },
- },
- },
-
- "koji-config": {
- volumes: []v1.Volume{
- {
- Name: "koji-config",
- VolumeSource: v1.VolumeSource{
- ConfigMap: &v1.ConfigMapVolumeSource{
- LocalObjectReference: v1.LocalObjectReference{
- Name: "",
- },
- },
- },
- },
- },
- volumeMounts: []v1.VolumeMount{
- {
- Name: "koji-config",
- MountPath: "/etc/koji.conf.d",
- },
- },
- },
-
- "krb5.conf": {
- volumes: []v1.Volume{
- {
- Name: "koji-kerberos",
- VolumeSource: v1.VolumeSource{
- ConfigMap: &v1.ConfigMapVolumeSource{
- LocalObjectReference: v1.LocalObjectReference{
- Name: "",
- },
- },
- },
- },
- },
- volumeMounts: []v1.VolumeMount{
- {
- Name: "koji-kerberos",
- MountPath: "/etc/krb5.conf.d",
- },
- },
- },
- }
-)
-```
-
-## func GetClient
-
-```go
-func GetClient(ctx ClusterContext) (*kubernetes.Clientset, string, error)
-```
-
-GetClient fetches the Kubernetes Client from a ClusterContext\.
-
-## func checkPort
-
-```go
-func checkPort(port int) error
-```
-
-checkPort checks if a port is open
-
-## func clusterRunner
-
-```go
-func clusterRunner(term termChan, cp CosaPodder, envVars []v1.EnvVar) error
-```
-
-clusterRunner creates an OpenShift/Kubernetes pod for the work to be done\. The output of the pod is streamed and captured on the console\.
-
-## func copyFile
-
-```go
-func copyFile(src, dest string) error
-```
-
-## func cosaInit
-
-```go
-func cosaInit() error
-```
-
-cosaInit does the initial COSA setup\. To support both pod and buildConfig based builds\, first check the API client\, then check envVars\. The use of envVars in this case is \*safe\*; \`SOURCE\_\{URI\,REF\} == apiBuild\.Spec\.Source\.Git\.\{URI\,REF\}\`\. That is\, SOURCE\_\* envVars will always match the apiBuild\.Spec\.Source\.Git\.\* values\.
-
-## func decompress
-
-```go
-func decompress(in io.ReadCloser, dir string) error
-```
-
-decompress takes an io\.ReadCloser extracts its to directory\.
-
-## func getHostname
-
-```go
-func getHostname() string
-```
-
-getHostname gets the current hostname
-
-## func getLocalFileStamp
-
-```go
-func getLocalFileStamp(path string) (int64, error)
-```
-
-getLocalFileStamp returns the local file mod time in UTC Unix epic nanoseconds\.
-
-## func getNetIP
-
-```go
-func getNetIP() (string, error)
-```
-
-getNetIP gets the IPV4 address of a pod when the pod's service account lacks permissions to obtain its own IP address\.
-
-## func getPodIP
-
-```go
-func getPodIP(cs *kubernetes.Clientset, podNamespace, podName string) (string, error)
-```
-
-getPodIP returns the IP of a pod\. getPodIP blocks pending until the podIP is recieved\.
-
-## func getPortOrNext
-
-```go
-func getPortOrNext(port int) int
-```
-
-getNextPort iterates and finds the next available port
-
-## func getPushTagless
-
-```go
-func getPushTagless(s string) (string, string)
-```
-
-getPushTagless returns the registry\, and push path i\.e registry\.svc:5000/image/bar:tag returns "registry\.svc:5000" and "image/bar"
-
-## func init
-
-```go
-func init()
-```
-
-## func isKnownBuildMeta
-
-```go
-func isKnownBuildMeta(n string) bool
-```
-
-isKnownBuildMeta checks if n is known and should be fetched and returned by pods via Minio\.
-
-## func k8sInClusterClient
-
-```go
-func k8sInClusterClient() (*kubernetes.Clientset, string, error)
-```
-
-k8sInClusterClient opens an in\-cluster Kubernetes API client\. The running pod must have a service account defined in the PodSpec\.
-
-## func kubernetesSecretsSetup
-
-```go
-func kubernetesSecretsSetup(ac *kubernetes.Clientset, ns, toDir string) ([]string, error)
-```
-
-kubernetesSecretSetup looks for matching secrets in the environment matching 'coreos\-assembler\.coreos\.com/secret=k' and then maps the secret automatically in\. "k" must be in the "known" secrets type to be mapped automatically\.
-
-## func mustHaveImage
-
-```go
-func mustHaveImage(ctx context.Context, image string) error
-```
-
-mustHaveImage pulls the image if it is not found
-
-## func newFileStamp
-
-```go
-func newFileStamp() string
-```
-
-newFileStamp returns the Unix nanoseconds of the file as a string We use Unix nanoseconds for precision\.
-
-## func ocpBuildClient
-
-```go
-func ocpBuildClient() error
-```
-
-ocpBuildClient initalizes the OpenShift Build Client API\.
-
-## func podmanRunner
-
-```go
-func podmanRunner(term termChan, cp CosaPodder, envVars []v1.EnvVar) error
-```
-
-podmanRunner runs the work in a Podman container using workDir as \`/srv\` \`podman kube play\` does not work well due to permission mappings; there is no way to do id mappings\.
-
-## func ptrBool
-
-```go
-func ptrBool(b bool) *bool
-```
-
-## func ptrInt
-
-```go
-func ptrInt(i int64) *int64
-```
-
-## func ptrInt32
-
-```go
-func ptrInt32(i int32) *int32
-```
-
-ptrInt32 converts an int32 to a ptr of the int32
-
-## func pushOstreeToRegistry
-
-```go
-func pushOstreeToRegistry(ctx ClusterContext, push *spec.Registry, build *cosa.Build) error
-```
-
-pushOstreetoRegistry pushes the OStree to the defined registry location\.
-
-## func randomString
-
-```go
-func randomString(n int) (string, error)
-```
-
-## func recieveInputBinary
-
-```go
-func recieveInputBinary() (string, error)
-```
-
-extractInputBinary processes the provided input stream as directed by BinaryBuildSource into dir\. OpenShift sends binary builds over stdin\. To make our life easier\, use the OpenShift API to process the input\. Returns the name of the file written\.
-
-## func sshForwarder
-
-```go
-func sshForwarder(ctx context.Context, cfg *SSHForwardPort) (chan<- bool, error)
-```
-
-sshForwarder is a generic forwarder from the local host to a remote host
-
-## func streamPodLogs
-
-```go
-func streamPodLogs(client *kubernetes.Clientset, namespace string, pod *v1.Pod, term termChan) error
-```
-
-streamPodLogs steams the pod's logs to logging and to disk\. Worker pods are responsible for their work\, but not for their logs\. To make streamPodLogs thread safe and non\-blocking\, it expects a pointer to a bool\. If that pointer is nil or true\, then we return\.
-
-## func tokenRegistryLogin
-
-```go
-func tokenRegistryLogin(ctx ClusterContext, tlsVerify *bool, registry string) error
-```
-
-tokenRegistryLogin logins to a registry using a service account
-
-## func uploadCustomBuildContainer
-
-```go
-func uploadCustomBuildContainer(ctx ClusterContext, tlsVerify *bool, apiBuild *buildapiv1.Build, build *cosa.Build) error
-```
-
-uploadCustomBuildContainer implements the custom build strategy optional step to report the results to the registry as an OCI image\. uploadCustomBuildContainer must be called from a worker pod\. The token used is associated with the service account with the worker pod\.
-
-## func uploadPathAsTarBall
-
-```go
-func uploadPathAsTarBall(ctx context.Context, bucket, object, path, workDir string, sudo bool, r *Return) error
-```
-
-uploadPathAsTarBall returns a path as a tarball to minio server\. This uses a shell call out since we need to elevate permissions via sudo \(bug in Golang \<1\.16 prevents elevating privs\)\. Gangplank runs as the builder user normally and since some files are written by root\, Gangplank will get permission denied\.
-
-The tarball creation will be done relative to workDir\. If workDir is an empty string\, it will default to the current working directory\.
-
-## func writeDockerSecret
-
-```go
-func writeDockerSecret(ctx ClusterContext, clusterSecretName, authPath string) error
-```
-
-writeDockerSecret writes the \.dockerCfg or \.dockerconfig to the correct path\. It accepts the cluster context\, the name of the secret and the location to write to\.
-
-## func writeToWriters
-
-```go
-func writeToWriters(l *log.Entry, in io.ReadCloser, outs ...io.Writer) <-chan error
-```
-
-writeToWriters writes in to outs until in or outs are closed\. When run a go\-routine\, calls can terminate by closing "in"\.
-
-## type Builder
-
-Builder implements the Build
-
-```go
-type Builder interface {
- Exec(ctx ClusterContext) error
-}
-```
-
-buildConfig is a builder\.
-
-```go
-var _ Builder = &buildConfig{}
-```
-
-workSpec is a Builder\.
-
-```go
-var _ Builder = &workSpec{}
-```
-
-### func NewBuilder
-
-```go
-func NewBuilder(ctx ClusterContext) (Builder, error)
-```
-
-NewBuilder returns a Builder\. NewBuilder determines what "Builder" to return by first trying Worker and then an OpenShift builder\.
-
-## type Cluster
-
-Cluster describes a Kubnetenes Cluster\.
-
-```go
-type Cluster struct {
- cs *kubernetes.Clientset
- nameSpace string
- kubeConfig string
-
- // inCluster indicates the client should use the Kubernetes in-cluster client
- inCluster bool
-
- // remoteCluster indicates Gangplank should run the supervising Gangplank in pod
- remoteCluster bool
-
- // podman indicates that the container should be built using Podman
- podman bool
-
- // podmanSrvDir is the scratch workdir for podman and is bind-mounted
- // in as /srv.
- podmanSrvDir string
-
- stdIn *os.File
- stdOut *os.File
- stdErr *os.File
-}
-```
-
-### func GetCluster
-
-```go
-func GetCluster(ctx ClusterContext) (*Cluster, error)
-```
-
-GetCluster fetches the Cluster options from the Context
-
-### func \(\*Cluster\) GetStdIO
-
-```go
-func (c *Cluster) GetStdIO() (*os.File, *os.File, *os.File)
-```
-
-GetStdIO returns the stdIO options
-
-### func \(\*Cluster\) SetPodman
-
-```go
-func (c *Cluster) SetPodman(srvDir string)
-```
-
-SetPodman forces out\-of\-cluster execution via Podman\.
-
-### func \(\*Cluster\) SetRemoteCluster
-
-```go
-func (c *Cluster) SetRemoteCluster(kubeConfig, namespace string)
-```
-
-SetRemote uses a remote cluster
-
-### func \(\*Cluster\) SetStdIO
-
-```go
-func (c *Cluster) SetStdIO(stdIn, stdOut, stdErr *os.File)
-```
-
-SetStdIO sets the IO options TODO: Implement for \`cosa remote\`
-
-### func \(\*Cluster\) toKubernetesCluster
-
-```go
-func (c *Cluster) toKubernetesCluster() *KubernetesCluster
-```
-
-toKubernetesCluster casts the cluster to the interface
-
-## type ClusterContext
-
-ClusterContext is a context
-
-```go
-type ClusterContext context.Context
-```
-
-### func NewClusterContext
-
-```go
-func NewClusterContext(ctx context.Context, kc KubernetesCluster) ClusterContext
-```
-
-NewClusterContext context with cluster options\.
-
-## type CosaPodder
-
-CosaPodder create COSA capable pods\.
-
-```go
-type CosaPodder interface {
- WorkerRunner(term termChan, envVar []v1.EnvVar) error
- GetClusterCtx() ClusterContext
- getPodSpec([]v1.EnvVar) (*v1.Pod, error)
-}
-```
-
-a cosaPod is a CosaPodder
-
-```go
-var _ CosaPodder = &cosaPod{}
-```
-
-hopPod implements the CosaPodder interface\.
-
-```go
-var _ CosaPodder = &hopPod{}
-```
-
-### func NewCosaPodder
-
-```go
-func NewCosaPodder(
- ctx ClusterContext,
- apiBuild *buildapiv1.Build,
- index int) (CosaPodder, error)
-```
-
-NewCosaPodder creates a CosaPodder
-
-### func NewHopPod
-
-```go
-func NewHopPod(ctx ClusterContext, image, serviceAccount, workDir string, js *spec.JobSpec) CosaPodder
-```
-
-NewHopPod returns a PodBuilder\.
-
-## type KubernetesCluster
-
-KubernetesCluster is the Gangplank interface to using a cluster\.
-
-```go
-type KubernetesCluster interface {
- SetStdIO(stdIn, stdOut, stdErr *os.File)
- GetStdIO() (*os.File, *os.File, *os.File)
- SetPodman(srvDir string)
- SetRemoteCluster(kubeConfig string, namespace string)
-}
-```
-
-Cluster implements a KubernetesCluster
-
-```go
-var _ KubernetesCluster = &Cluster{}
-```
-
-### func NewCluster
-
-```go
-func NewCluster(inCluster bool) KubernetesCluster
-```
-
-NewCluster returns a Kubernetes cluster
-
-## type PodBuilder
-
-PodBuilder is the manual/unbounded Build interface\. A PodBuilder uses a build\.openshift\.io/v1 Build interface to use the exact same code path between the two\.
-
-```go
-type PodBuilder interface {
- Exec(ctx ClusterContext) error
-}
-```
-
-cli is a Builder \(and a poor one at that too\.\.\.\) While a ClusterPodBuilder is a Builder\, we treat it seperately\.
-
-```go
-var _ PodBuilder = &podBuild{}
-```
-
-### func NewPodBuilder
-
-```go
-func NewPodBuilder(ctx ClusterContext, image, serviceAccount, workDir string, js *spec.JobSpec) (PodBuilder, error)
-```
-
-NewPodBuilder returns a ClusterPodBuilder ready for execution\.
-
-## type RemoteFile
-
-RemoteFile is an object to fetch from a remote server
-
-```go
-type RemoteFile struct {
- Bucket string `json:"bucket,omitempty"`
- Object string `json:"object,omitempty"`
- Minio *minioServer `json:"remote,omitempty"`
- Compressed bool `json:"comptempty"`
- Artifact *cosa.Artifact `json:"artifact,omitempty"`
-
- // ForcePath forces writing to, or uncompressing to a specific path
- ForcePath string `json:"force_path,omitempty"`
-
- // ForcePath forces writing to, or uncompressing to a specific path
- ForceExtractPath string `json:"force_extract_path,omitempty"`
-}
-```
-
-### func getBuildMeta
-
-```go
-func getBuildMeta(jsonPath, keyPathBase string, m *minioServer, l *log.Entry) []*RemoteFile
-```
-
-getBuildMeta searches a path for all build meta files and creates remoteFiles for them\. The keyPathBase is the relative path for the object\.
-
-### func getStageFiles
-
-```go
-func getStageFiles(buildID string,
- l *log.Entry, m *minioServer, lastBuild *cosa.Build, s *spec.Stage) (*cosa.Build, []*RemoteFile, error)
-```
-
-getStageFiles returns the newest build and RemoteFiles for the stage\. Depending on the stages dependencies\, it will ensure that all meta\-data and artifacts are send\. If the stage requires/requests the caches\, it will be included in the RemoteFiles\.
-
-### func \(\*RemoteFile\) Extract
-
-```go
-func (r *RemoteFile) Extract(ctx context.Context, path string) error
-```
-
-Extract decompresses the remote file to the path\.
-
-### func \(\*RemoteFile\) WriteToPath
-
-```go
-func (r *RemoteFile) WriteToPath(ctx context.Context, path string) error
-```
-
-WriteToPath fetches the remote file and writes it locally\.
-
-## type Return
-
-Return describes the location of where to send results\.
-
-```go
-type Return struct {
- Minio *minioServer `json:"remote"`
- Bucket string `json:"bucket"`
- Overwrite bool `json:"overwrite"`
-
- // ArtifactTypes will return only artifacts that known and defined
- // For example []string{"aws","azure"}
- ArtifactTypes []string `json:"artifacts"`
-
- // Return all files found in the builds directory
- All bool `json:"all"`
-}
-```
-
-### func \(\*Return\) Run
-
-```go
-func (r *Return) Run(ctx context.Context, ws *workSpec) error
-```
-
-Run executes the report by walking the build path\.
-
-## type Returner
-
-Returner sends the results to the ReportServer
-
-```go
-type Returner interface {
- Run(ctx context.Context, ws *workSpec) error
-}
-```
-
-Return is a Returner
-
-```go
-var _ Returner = &Return{}
-```
-
-## type SSHForwardPort
-
-```go
-type SSHForwardPort struct {
- Host string
- User string
- Key string
-
- // port is not exported
- port int
-}
-```
-
-### func getSshMinioForwarder
-
-```go
-func getSshMinioForwarder(j *spec.JobSpec) *SSHForwardPort
-```
-
-getSshMinioForwarder returns an SSHForwardPort from the jobspec definition for forwarding a minio server\, or nil if forwarding is not enabled\.
-
-## type SecretMapper
-
-SecretMapper maps a secretMap
-
-```go
-type SecretMapper interface {
- Setup() error
-}
-```
-
-## type buildConfig
-
-buildConfig represent the input into a buildConfig\.
-
-```go
-type buildConfig struct {
- JobSpecURL string `envVar:"COSA_JOBSPEC_URL"`
- JobSpecRef string `envVar:"COSA_JOBSPEC_REF"`
- JobSpecFile string `envVar:"COSA_JOBSPEC_FILE"`
- CosaCmds string `envVar:"COSA_CMDS"`
-
- // Information about the parent pod
- PodName string `envVar:"COSA_POD_NAME"`
- PodIP string `envVar:"COSA_POD_IP"`
- PodNameSpace string `envVar:"COSA_POD_NAMESPACE"`
-
- // HostIP is the kubernetes IP address of the running pod.
- HostIP string
- HostPod string
-
- // Internal copy of the JobSpec
- JobSpec spec.JobSpec
-
- ClusterCtx ClusterContext
-}
-```
-
-### func newBC
-
-```go
-func newBC(ctx context.Context, c *Cluster) (*buildConfig, error)
-```
-
-newBC accepts a context and returns a buildConfig
-
-### func \(\*buildConfig\) Exec
-
-```go
-func (bc *buildConfig) Exec(ctx ClusterContext) (err error)
-```
-
-Exec executes the command using the closure for the commands
-
-### func \(\*buildConfig\) discoverStages
-
-```go
-func (bc *buildConfig) discoverStages(m *minioServer) ([]*RemoteFile, error)
-```
-
-discoverStages supports the envVar and \*\.cosa\.sh scripts as implied stages\. The envVar stage will be run first\, followed by the \`\*\.cosa\.sh\` scripts\.
-
-### func \(\*buildConfig\) ocpBinaryInput
-
-```go
-func (bc *buildConfig) ocpBinaryInput(m *minioServer) ([]*RemoteFile, error)
-```
-
-ocpBinaryInput decompresses the binary input\. If the binary input is a tarball with an embedded JobSpec\, its extracted\, read and used\.
-
-## type byteFields
-
-byteField represents a configMap's data fields
-
-```go
-type byteFields map[string][]byte
-```
-
-## type clusterCtxKey
-
-```go
-type clusterCtxKey int
-```
-
-```go
-const clusterObj clusterCtxKey = 0
-```
-
-## type consoleLogWriter
-
-consoleLogWriter is an io\.Writer that emits fancy logs to a screen\.
-
-```go
-type consoleLogWriter struct {
- startTime time.Time
- prefix string
-}
-```
-
-### func newConsoleLogWriter
-
-```go
-func newConsoleLogWriter(prefix string) *consoleLogWriter
-```
-
-newConosleLogWriter is a helper function for getting a new writer\.
-
-### func \(\*consoleLogWriter\) Write
-
-```go
-func (cw *consoleLogWriter) Write(b []byte) (int, error)
-```
-
-Write implements io\.Writer for Console Writer with
-
-## type cosaPod
-
-cosaPod is a COSA pod
-
-```go
-type cosaPod struct {
- apiBuild *buildapiv1.Build
- clusterCtx ClusterContext
-
- ocpInitCommand []string
- ocpRequirements v1.ResourceList
- ocpSecContext *v1.SecurityContext
- volumes []v1.Volume
- volumeMounts []v1.VolumeMount
-
- index int
-}
-```
-
-### func \(\*cosaPod\) GetClusterCtx
-
-```go
-func (cp *cosaPod) GetClusterCtx() ClusterContext
-```
-
-### func \(\*cosaPod\) WorkerRunner
-
-```go
-func (cp *cosaPod) WorkerRunner(term termChan, envVars []v1.EnvVar) error
-```
-
-WorkerRunner runs a worker pod on either OpenShift/Kubernetes or in as a podman container\.
-
-### func \(\*cosaPod\) addVolumeFromObjectLabel
-
-```go
-func (cp *cosaPod) addVolumeFromObjectLabel(obj metav1.Object, fields stringFields) error
-```
-
-addVolumeFromObjectLabel is a helper that recieves an object and data and looks up the object's name from volMaps\. If a mapping is found\, then the object is added to cosaPod's definition\.
-
-### func \(\*cosaPod\) addVolumesFromConfigMapLabels
-
-```go
-func (cp *cosaPod) addVolumesFromConfigMapLabels() error
-```
-
-addVolumesFromConfigMapLabels discovers configMaps with matching labels and if known\, adds the defined volume mount from volMaps\.
-
-### func \(\*cosaPod\) addVolumesFromSecretLabels
-
-```go
-func (cp *cosaPod) addVolumesFromSecretLabels() error
-```
-
-addVolumesFromSecretLabels discovers secrets with matching labels and if known\, adds the defined volume mount from volMaps\.
-
-### func \(\*cosaPod\) getPodSpec
-
-```go
-func (cp *cosaPod) getPodSpec(envVars []v1.EnvVar) (*v1.Pod, error)
-```
-
-getPodSpec returns a pod specification\.
-
-## type hopPod
-
-hopPod describes a remote pod for running Gangplank in a cluster remotely\.
-
-```go
-type hopPod struct {
- clusterCtx ClusterContext
- js *spec.JobSpec
-
- image string
- ns string
- serviceAccount string
-}
-```
-
-### func \(\*hopPod\) GetClusterCtx
-
-```go
-func (h *hopPod) GetClusterCtx() ClusterContext
-```
-
-GetClusterCtx returns the cluster context of a hopPod
-
-### func \(\*hopPod\) WorkerRunner
-
-```go
-func (h *hopPod) WorkerRunner(term termChan, _ []v1.EnvVar) error
-```
-
-Exec Gangplank locally through a remote/hop pod that runs Gangplank in a cluster\.
-
-### func \(\*hopPod\) getPodSpec
-
-```go
-func (h *hopPod) getPodSpec([]v1.EnvVar) (*v1.Pod, error)
-```
-
-getSpec createa a very generic pod that can run on any Cluster\. The pod will mimic a build api pod\.
-
-## type minioServer
-
-minioServer describes a Minio S3 Object stoarge to start\.
-
-```go
-type minioServer struct {
- AccessKey string `json:"accesskey"`
- SecretKey string `json:"secretkey"`
- Host string `json:"host"`
- Port int `json:"port"`
- ExternalServer bool `json:"external_server"` //indicates that a server should not be started
- Region string `json:"region"`
-
- // overSSH describes how to forward the Minio Port over SSH
- // This option is only useful with envVar CONTAINER_HOST running
- // in podman mode.
- overSSH *SSHForwardPort
- // sshStopCh is used to shutdown the SSH port forwarding.
- sshStopCh chan<- bool
-
- dir string
- minioOptions minio.Options
- cmd *exec.Cmd
-}
-```
-
-### func StartStandaloneMinioServer
-
-```go
-func StartStandaloneMinioServer(ctx context.Context, srvDir, cfgFile string, overSSH *SSHForwardPort) (*minioServer, error)
-```
-
-StartStanaloneMinioServer starts a standalone minio server\.
-
-### func minioCfgFromFile
-
-```go
-func minioCfgFromFile(f string) (mk minioServer, err error)
-```
-
-minioCfgFromFile returns a minio configuration from a file
-
-### func minioCfgReader
-
-```go
-func minioCfgReader(in io.Reader) (m minioServer, err error)
-```
-
-minioKeysReader takes an io\.Reader and returns a minio cfg
-
-### func newMinioServer
-
-```go
-func newMinioServer(cfgFile string) *minioServer
-```
-
-newMinioSever defines an ephemeral minioServer from a config or creates a new one\. To prevent random pods/people accessing or relying on the server\, we use entirely random keys\.
-
-### func \(\*minioServer\) Exists
-
-```go
-func (m *minioServer) Exists(bucket, object string) bool
-```
-
-Exists check if bucket/object exists\.
-
-### func \(\*minioServer\) Kill
-
-```go
-func (m *minioServer) Kill()
-```
-
-Kill terminates the minio server\.
-
-### func \(\*minioServer\) Wait
-
-```go
-func (m *minioServer) Wait()
-```
-
-Wait blocks until Minio is finished\.
-
-### func \(\*minioServer\) WriteJSON
-
-```go
-func (m *minioServer) WriteJSON(w io.Writer) error
-```
-
-WriteJSON returns the jobspec
-
-### func \(\*minioServer\) WriteToFile
-
-```go
-func (m *minioServer) WriteToFile(f string) error
-```
-
-minioKeysFromFile writes the minio keys to a file
-
-### func \(\*minioServer\) client
-
-```go
-func (m *minioServer) client() (*minio.Client, error)
-```
-
-GetClient returns a Minio Client
-
-### func \(\*minioServer\) ensureBucketExists
-
-```go
-func (m *minioServer) ensureBucketExists(ctx context.Context, bucket string) error
-```
-
-### func \(\*minioServer\) exec
-
-```go
-func (m *minioServer) exec(ctx context.Context) error
-```
-
-exec runs the minio command
-
-### func \(\*minioServer\) fetcher
-
-```go
-func (m *minioServer) fetcher(ctx context.Context, bucket, object string, dest io.Writer) error
-```
-
-fetcher retrieves an object from a Minio server
-
-### func \(\*minioServer\) getStamp
-
-```go
-func (m *minioServer) getStamp(bucket, object string) (int64, error)
-```
-
-getStamp returns the stamp\. If the file does not exist remotely the stamp of zero is returned\. If the file exists but has not been stamped\, then UTC Unix epic in nanoseconds of the modification time is used \(the stamps are lost when the minio instance is reaped\)\. The obvious flaw is that this does require all hosts to have coordinate time; this should be the case for Kubernetes cluster and podman based builds will always use the same time source\.
-
-### func \(\*minioServer\) isLocalNewer
-
-```go
-func (m *minioServer) isLocalNewer(bucket, object string, path string) (bool, error)
-```
-
-isLocalNewer checks if the file is newer than the remote file\, if any\. If the file does not exist remotely\, then it is considered newer\.
-
-### func \(\*minioServer\) putter
-
-```go
-func (m *minioServer) putter(ctx context.Context, bucket, object, fpath string) error
-```
-
-putter uploads the contents of an io\.Reader to a remote MinioServer
-
-### func \(\*minioServer\) stampFile
-
-```go
-func (m *minioServer) stampFile(bucket, object string) error
-```
-
-stampFile add the unique stamp
-
-### func \(\*minioServer\) start
-
-```go
-func (m *minioServer) start(ctx context.Context) error
-```
-
-start executes the minio server and returns an error if not ready\.
-
-## type mountReferance
-
-mountReferance is mapping of secrets or a configmap
-
-```go
-type mountReferance struct {
- volumes []v1.Volume
- volumeMounts []v1.VolumeMount
- requireData []string
- addInitCommands []string
-}
-```
-
-## type outWriteCloser
-
-outWriteCloser is a noop closer
-
-```go
-type outWriteCloser struct {
- *os.File
-}
-```
-
-### func newNoopFileWriterCloser
-
-```go
-func newNoopFileWriterCloser(f *os.File) *outWriteCloser
-```
-
-### func \(\*outWriteCloser\) Close
-
-```go
-func (o *outWriteCloser) Close() error
-```
-
-## type podBuild
-
-```go
-type podBuild struct {
- apibuild *buildapiv1.Build
- bc *buildConfig
- js *spec.JobSpec
-
- clusterCtx ClusterContext
- pod *v1.Pod
-
- hostname string
- image string
- ipaddr string
- projectNamespace string
- serviceAccount string
- workDir string
-}
-```
-
-### func \(\*podBuild\) Exec
-
-```go
-func (pb *podBuild) Exec(ctx ClusterContext) error
-```
-
-Exec starts the unbounded build\.
-
-### func \(\*podBuild\) encodeAPIBuild
-
-```go
-func (pb *podBuild) encodeAPIBuild() (string, error)
-```
-
-encodeAPIBuilder the ci buildapiv1 object to a JSON object\. JSON is the messaginging interface for Kubernetes\.
-
-### func \(\*podBuild\) generateAPIBuild
-
-```go
-func (pb *podBuild) generateAPIBuild() error
-```
-
-generateAPIBuild creates a "mock" buildconfig\.openshift\.io/v1 Kubernetes object that is consumed by \`bc\.go\`\.
-
-### func \(\*podBuild\) setInCluster
-
-```go
-func (pb *podBuild) setInCluster() error
-```
-
-setInCluster does the nessasary setup for unbounded builder running as an in\-cluster build\.
-
-## type podmanRunnerFunc
-
-```go
-type podmanRunnerFunc func(termChan, CosaPodder, []v1.EnvVar) error
-```
-
-podmanFunc is set to unimplemented by default\.
-
-```go
-var podmanFunc podmanRunnerFunc = func(termChan, CosaPodder, []v1.EnvVar) error {
- return errors.New("build was not compiled with podman supprt")
-}
-```
-
-## type secretMap
-
-```go
-type secretMap struct {
- label string
- envVarMap varMap
- fileVarMap varMap
-}
-```
-
-### func getSecretMapping
-
-```go
-func getSecretMapping(s string) (*secretMap, bool)
-```
-
-Get SecretMapping returns the secretMap and true if found\.
-
-### func \(\*secretMap\) writeSecretEnvVars
-
-```go
-func (sm *secretMap) writeSecretEnvVars(d map[string][]byte, ret *[]string) error
-```
-
-writeSecretEnvVars creates envVars\.
-
-### func \(\*secretMap\) writeSecretFiles
-
-```go
-func (sm *secretMap) writeSecretFiles(toDir, name string, d map[string][]byte, ret *[]string) error
-```
-
-writeSecretFiles writes secrets to their location based on the map\.
-
-## type stringFields
-
-stringFields represent a secret's data fields
-
-```go
-type stringFields map[string]string
-```
-
-### func toStringFields
-
-```go
-func toStringFields(bf byteFields) stringFields
-```
-
-toStringFields is used to convert from a byteFields to a stringFields
-
-## type termChan
-
-termChan is a channel used to singal a termination
-
-```go
-type termChan <-chan bool
-```
-
-## type varMap
-
-```go
-type varMap map[string]string
-```
-
-## type workSpec
-
-workSpec define job for remote worker to do A workSpec is dispatched by a builder and is tightly coupled to to the dispatching pod\.
-
-```go
-type workSpec struct {
- RemoteFiles []*RemoteFile `json:"remotefiles"`
- JobSpec spec.JobSpec `json:"jobspec"`
- ExecuteStages []string `json:"executeStages"`
- APIBuild *buildapiv1.Build `json:"apiBuild"`
- Return *Return `json:"return"`
-}
-```
-
-### func newWorkSpec
-
-```go
-func newWorkSpec(ctx ClusterContext) (*workSpec, error)
-```
-
-newWorkSpec returns a workspec from the environment
-
-### func \(\*workSpec\) Exec
-
-```go
-func (ws *workSpec) Exec(ctx ClusterContext) error
-```
-
-Exec executes the work spec tasks\.
-
-### func \(\*workSpec\) Marshal
-
-```go
-func (ws *workSpec) Marshal() ([]byte, error)
-```
-
-Marshal returns the JSON of a WorkSpec\.
-
-### func \(\*workSpec\) Unmarshal
-
-```go
-func (ws *workSpec) Unmarshal(r io.Reader) error
-```
-
-Unmarshal decodes an io\.Reader to a workSpec\.
-
-### func \(\*workSpec\) getEnvVars
-
-```go
-func (ws *workSpec) getEnvVars() ([]v1.EnvVar, error)
-```
-
-getEnvVars returns the envVars to be exposed to the worker pod\. When \`newWorkSpec\` is called\, the envVar will read the embedded string JSON and the worker will get its configuration\.
-
-
-
-Generated by [gomarkdoc]()
diff --git a/docs/gangplank/api-spec.md b/docs/gangplank/api-spec.md
deleted file mode 100755
index 89c67ddb0a1d127d9f39fe5ebd4556987e3772b4..0000000000000000000000000000000000000000
--- a/docs/gangplank/api-spec.md
+++ /dev/null
@@ -1,882 +0,0 @@
-
-
-# spec
-
-```go
-import "github.com/coreos/gangplank/spec"
-```
-
-## Index
-
-- [Constants](<#constants>)
-- [Variables](<#variables>)
-- [func AddKolaTestFlags(targetVar *[]string, fs *pflag.FlagSet)](<#func-addkolatestflags>)
-- [func GetArtifactShortHandNames() []string](<#func-getartifactshorthandnames>)
-- [func addShorthandToStage(artifact string, stage *Stage)](<#func-addshorthandtostage>)
-- [func cosaBuildCmd(b string, js *JobSpec) ([]string, error)](<#func-cosabuildcmd>)
-- [func init()](<#func-init>)
-- [func isBaseArtifact(artifact string) bool](<#func-isbaseartifact>)
-- [func isValidArtifactShortHand(a string) bool](<#func-isvalidartifactshorthand>)
-- [func strPtr(s string) *string](<#func-strptr>)
-- [type Aliyun](<#type-aliyun>)
- - [func (a *Aliyun) GetPublishCommand(buildID string) (string, error)](<#func-aliyun-getpublishcommand>)
-- [type Archives](<#type-archives>)
-- [type Artifacts](<#type-artifacts>)
-- [type Aws](<#type-aws>)
- - [func (a *Aws) GetPublishCommand(buildID string) (string, error)](<#func-aws-getpublishcommand>)
-- [type Azure](<#type-azure>)
- - [func (a *Azure) GetPublishCommand(buildID string) (string, error)](<#func-azure-getpublishcommand>)
-- [type Brew](<#type-brew>)
-- [type Cloud](<#type-cloud>)
-- [type CloudsCfgs](<#type-cloudscfgs>)
- - [func (c *CloudsCfgs) GetCloudCfg(cloud string) (Cloud, error)](<#func-cloudscfgs-getcloudcfg>)
-- [type Gcp](<#type-gcp>)
- - [func (g *Gcp) GetPublishCommand(buildID string) (string, error)](<#func-gcp-getpublishcommand>)
-- [type Job](<#type-job>)
-- [type JobSpec](<#type-jobspec>)
- - [func JobSpecFromFile(f string) (j JobSpec, err error)](<#func-jobspecfromfile>)
- - [func JobSpecFromRepo(url, ref, specFile string) (JobSpec, error)](<#func-jobspecfromrepo>)
- - [func JobSpecReader(in io.Reader) (j JobSpec, err error)](<#func-jobspecreader>)
- - [func (js *JobSpec) AddCliFlags(cmd *pflag.FlagSet)](<#func-jobspec-addcliflags>)
- - [func (js *JobSpec) AddRepos()](<#func-jobspec-addrepos>)
- - [func (j *JobSpec) GenerateStages(fromNames, testNames []string, singleStage bool) error](<#func-jobspec-generatestages>)
- - [func (j *JobSpec) GetStage(id string) (*Stage, error)](<#func-jobspec-getstage>)
- - [func (js *JobSpec) WriteJSON(w io.Writer) error](<#func-jobspec-writejson>)
- - [func (js *JobSpec) WriteYAML(w io.Writer) error](<#func-jobspec-writeyaml>)
-- [type Override](<#type-override>)
- - [func (o *Override) Fetch(l *log.Entry, path string, wf TarDecompressorFunc) error](<#func-override-fetch>)
- - [func (o *Override) writePath(basePath string) (string, error)](<#func-override-writepath>)
-- [type PublishOscontainer](<#type-publishoscontainer>)
-- [type PushSecretType](<#type-pushsecrettype>)
-- [type Recipe](<#type-recipe>)
-- [type Registry](<#type-registry>)
-- [type RenderData](<#type-renderdata>)
- - [func (rd *RenderData) ExecuteTemplateFromString(s ...string) ([]string, error)](<#func-renderdata-executetemplatefromstring>)
- - [func (rd *RenderData) ExecuteTemplateToWriter(in io.Reader, out io.Writer) error](<#func-renderdata-executetemplatetowriter>)
- - [func (rd *RenderData) RendererExecuter(ctx context.Context, env []string, scripts ...string) error](<#func-renderdata-rendererexecuter>)
- - [func (rd *RenderData) executeTemplate(r io.Reader) ([]byte, error)](<#func-renderdata-executetemplate>)
-- [type Repo](<#type-repo>)
- - [func (r *Repo) Writer(path string) (string, error)](<#func-repo-writer>)
-- [type S3](<#type-s3>)
-- [type Spec](<#type-spec>)
-- [type Stage](<#type-stage>)
- - [func (s *Stage) AddCommands(args []string)](<#func-stage-addcommands>)
- - [func (s *Stage) AddRequires(args []string)](<#func-stage-addrequires>)
- - [func (s *Stage) DeepCopy() (Stage, error)](<#func-stage-deepcopy>)
- - [func (s *Stage) Execute(ctx context.Context, rd *RenderData, envVars []string) error](<#func-stage-execute>)
- - [func (s *Stage) getCommands(rd *RenderData) ([]string, error)](<#func-stage-getcommands>)
- - [func (s *Stage) getPostCommands(rd *RenderData) ([]string, error)](<#func-stage-getpostcommands>)
- - [func (s *Stage) getPublishCommands(rd *RenderData) ([]string, error)](<#func-stage-getpublishcommands>)
-- [type TarDecompressorFunc](<#type-tardecompressorfunc>)
-- [type kolaTests](<#type-kolatests>)
-
-
-## Constants
-
-```go
-const (
- fedoraGitURL = "https://github.com/coreos/fedora-coreos-config"
- fedoraGitRef = "testing-devel"
-
- rhcosGitURL = "https://github.com/openshift/os"
- rhcosGitRef = "main"
-)
-```
-
-Supported push secret types\.
-
-```go
-const (
- // PushSecretTypeInline means that the secret string is a string literal
- // of the docker auth.json.
- PushSecretTypeInline = "inline"
- // PushSecretTypeCluster indicates that the named secret in PushRegistry should be
- // fetched via the service account from the cluster.
- PushSecretTypeCluster = "cluster"
- // PushSecretTypeToken indicates that the service account associated with the token
- // has access to the push repository.
- PushSecretTypeToken = "token"
-)
-```
-
-```go
-const (
- TarballTypeAll = "all"
- TarballTypeRpms = "rpms"
- TarballTypeRpm = "rpm"
- TarballTypeRootfs = "rootfs"
- overrideBasePath = "overrides"
-)
-```
-
-These are the only hard\-coded commands that Gangplank understand\.
-
-```go
-const (
- // defaultBaseCommand is the basic build command
- defaultBaseCommand = "cosa fetch; cosa build;"
- // defaultBaseDelayMergeCommand is used for distributed build using
- // parallel workers pods.
- defaultBaseDelayMergeCommand = "cosa fetch; cosa build --delay-meta-merge;"
-
- // defaultFinalizeComamnd ensures that the meta.json is merged.
- defaultFinalizeCommand = "cosa meta --finalize;"
-)
-```
-
-DefaultJobSpecFile is the default JobSpecFile name\.
-
-```go
-const DefaultJobSpecFile = "jobspec.yaml"
-```
-
-## Variables
-
-Default to building Fedora
-
-```go
-var (
- gitRef = fedoraGitRef
- gitURL = fedoraGitURL
-
- // repos is a list a URLs that is added to the Repos.
- repos []string
-)
-```
-
-```go
-var (
- // pseudoStages are special setup and tear down phases.
- pseudoStages = []string{"base", "finalize"}
- // buildableArtifacts are known artifacts types from the schema.
- buildableArtifacts = append(pseudoStages, cosa.GetCommandBuildableArtifacts()...)
-
- // baseArtifacts are default built by the "base" short-hand
- baseArtifacts = []string{"ostree", "qemu"}
-)
-```
-
-kolaTestDefinitions contain a map of the kola tests\.
-
-```go
-var kolaTestDefinitions = kolaTests{
- "basicBios": {
- ID: "Kola Basic BIOS Test",
- PostCommands: []string{"cosa kola run --qemu-nvme=true basic"},
- RequireArtifacts: []string{"qemu"},
- ExecutionOrder: 2,
- },
- "basicQemu": {
- ID: "Kola Basic Qemu",
- PostCommands: []string{"cosa kola --basic-qemu-scenarios"},
- RequireArtifacts: []string{"qemu"},
- ExecutionOrder: 2,
- },
- "basicUEFI": {
- ID: "Basic UEFI Test",
- PostCommands: []string{"cosa kola run --qemu-firmware=uefi basic"},
- RequireArtifacts: []string{"qemu"},
- ExecutionOrder: 2,
- },
- "external": {
- ID: "Enternal Kola Test",
- PostCommands: []string{"cosa kola run 'ext.*'"},
- RequireArtifacts: []string{"qemu"},
- ExecutionOrder: 2,
- },
- "long": {
- ID: "Kola Long Tests",
- PostCommands: []string{"cosa kola run --parallel 3"},
- ExecutionOrder: 2,
- },
- "upgrade": {
- ID: "Kola Upgrade Test",
- PostCommands: []string{"kola run-upgrade --ignition-version v2 --output-dir tmp/kola-upgrade"},
- ExecutionOrder: 2,
- },
-
- "iso": {
- ID: "Kola ISO Testing",
- PostCommands: []string{"kola testiso -S"},
- ExecutionOrder: 4,
- RequireArtifacts: []string{"live-iso"},
- },
- "metal4k": {
- ID: "Kola ISO Testing 4K Disks",
- PostCommands: []string{"kola testiso -S --qemu-native-4k --scenarios iso-install --output-dir tmp/kola-metal4k"},
- ExecutionOrder: 4,
- RequireArtifacts: []string{"live-iso"},
- },
-}
-```
-
-## func AddKolaTestFlags
-
-```go
-func AddKolaTestFlags(targetVar *[]string, fs *pflag.FlagSet)
-```
-
-AddKolaTestFlags adds a StringVar flag for populating supported supported test into a string slice\.
-
-## func GetArtifactShortHandNames
-
-```go
-func GetArtifactShortHandNames() []string
-```
-
-GetArtifactShortHandNames returns shorthands for buildable stages
-
-## func addShorthandToStage
-
-```go
-func addShorthandToStage(artifact string, stage *Stage)
-```
-
-addShorthandToStage adds in a build shorthand into the stage and ensures that required dependencies are correclty ordered Ordering assumptions: 1\. Base builds 2\. Basic Kola Tests 3\. Metal and Live ISO images 4\. Metal and Live ISO testings 5\. Cloud stages
-
-## func cosaBuildCmd
-
-```go
-func cosaBuildCmd(b string, js *JobSpec) ([]string, error)
-```
-
-cosaBuildCmds checks if b is a buildable artifact type and then returns it\.
-
-## func init
-
-```go
-func init()
-```
-
-## func isBaseArtifact
-
-```go
-func isBaseArtifact(artifact string) bool
-```
-
-isBaseArtifact is a check function for determining if an artifact is built by the base stage\.
-
-## func isValidArtifactShortHand
-
-```go
-func isValidArtifactShortHand(a string) bool
-```
-
-isValidArtifactShortHand checks if the shortand is valid
-
-## func strPtr
-
-```go
-func strPtr(s string) *string
-```
-
-strPtr is a helper for returning a string pointer
-
-## type Aliyun
-
-Aliyun is nested under CloudsCfgs and describes where the Aliyun/Alibaba artifacts should be uploaded to\.
-
-```go
-type Aliyun struct {
- Bucket string `yaml:"bucket,omitempty" json:"bucket,omitempty"`
- Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
- Public bool `yaml:"public,omitempty" json:"public,omitempty"`
- Regions []string `yaml:"regions,omitempty" json:"regions,omitempty"`
-}
-```
-
-### func \(\*Aliyun\) GetPublishCommand
-
-```go
-func (a *Aliyun) GetPublishCommand(buildID string) (string, error)
-```
-
-GetPublishCommand returns the cosa upload command for Aliyun
-
-## type Archives
-
-Archives describes the location of artifacts to push to Brew is a nested Brew struct S3: publish to S3\.
-
-```go
-type Archives struct {
- Brew *Brew `yaml:"brew,omitempty" json:"brew,omitempty"`
- S3 *S3 `yaml:"s3,omitempty" json:"s3,omitempty"`
-}
-```
-
-## type Artifacts
-
-Artifacts describe the expect build outputs\. All: name of the all the artifacts Primary: Non\-cloud builds Clouds: Cloud publication stages\.
-
-```go
-type Artifacts struct {
- All []string `yaml:"all,omitempty" json:"all,omitempty"`
- Primary []string `yaml:"primary,omitempty" json:"primary,omitempty"`
- Clouds []string `yaml:"clouds,omitempty" json:"clouds,omitempty"`
-}
-```
-
-## type Aws
-
-```go
-type Aws struct {
- Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
- AmiPath string `yaml:"ami_path,omitempty" json:"ami_path,omitempty"`
- Geo string `yaml:"geo,omitempty" json:"geo,omitempty"`
- GrantUser []string `yaml:"grant_user,omitempty" json:"grant_user,omitempty"`
- GrantUserSnapshot []string `yaml:"grant_user_snapshot,omitempty" json:"grant_user_snapshot,omitempty"`
- Public bool `yaml:"public,omitempty" json:"public,omitempty"`
- Regions []string `yaml:"regions,omitempty" json:"regions,omitempty"`
-}
-```
-
-### func \(\*Aws\) GetPublishCommand
-
-```go
-func (a *Aws) GetPublishCommand(buildID string) (string, error)
-```
-
-GetPublishCommand returns the cosa upload command for Aws
-
-## type Azure
-
-Azure describes upload options for Azure images\. Enabled: upload if true ResourceGroup: the name of the Azure resource group StorageAccount: name of the storage account StorageContainer: name of the storage container StorageLocation: name of the Azure region\, i\.e\. us\-east\-1
-
-```go
-type Azure struct {
- Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
- ResourceGroup string `yaml:"resource_group,omitempty" json:"resource_group,omitempty"`
- StorageAccount string `yaml:"storage_account,omitempty" json:"stoarge_account,omitempty"`
- StorageContainer string `yaml:"storage_container,omitempty" json:"storage_container,omitempty"`
- StorageLocation string `yaml:"storage_location,omitempty" json:"storage_location,omitempty"`
- Force bool `yaml:"force,omitempty" json:"force,omitempty"`
-}
-```
-
-### func \(\*Azure\) GetPublishCommand
-
-```go
-func (a *Azure) GetPublishCommand(buildID string) (string, error)
-```
-
-GetPublishCommand returns the cosa upload command for Azure
-
-## type Brew
-
-Brew is the RHEL Koji instance for storing artifacts\. Principle: the Kerberos user Profile: the profile to use\, i\.e\. brew\-testing Tag: the Brew tag to tag the build as\.
-
-```go
-type Brew struct {
- Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
- Principle string `yaml:"principle,omitempty" json:"principle,omitempty"`
- Profile string `yaml:"profile,omitempty" json:"profile,omitempty"`
- Tag string `yaml:"tag,omitempty" json:"tag,omitempty"`
-}
-```
-
-## type Cloud
-
-```go
-type Cloud interface {
- GetPublishCommand(string) (string, error)
-}
-```
-
-## type CloudsCfgs
-
-CloudsCfgs \(yes Clouds\) is a nested struct of all supported cloudClonfigurations\.
-
-```go
-type CloudsCfgs struct {
- Aliyun *Aliyun `yaml:"aliyun,omitempty" json:"aliyun,omitempty"`
- Aws *Aws `yaml:"aws,omitempty" json:"aws,omitempty"`
- AwsCn *Aws `yaml:"aws-cn,omitempty" json:"aws-cn,omitempty"`
- Azure *Azure `yaml:"azure,omitempty" json:"azure,omitempty"`
- Gcp *Gcp `yaml:"gcp,omitempty" json:"gcp,omitempty"`
-}
-```
-
-### func \(\*CloudsCfgs\) GetCloudCfg
-
-```go
-func (c *CloudsCfgs) GetCloudCfg(cloud string) (Cloud, error)
-```
-
-getCloudsCfgs returns list of clouds that are defined in the jobspec\. Since omitempty is used when unmarshaling some objects will not be available
-
-## type Gcp
-
-Gcp describes deploying to the GCP environment Bucket: name of GCP bucket to store image in Enabled: when true\, publish to GCP Project: name of the GCP project to use CreateImage: Whether or not to create an image in GCP after upload Deprecated: If the image should be marked as deprecated Description: The description that should be attached to the image Enabled: toggle for uploading to GCP Family: GCP image family to attach image to License: The licenses that should be attached to the image LogLevel: log level\-\-DEBUG\, WARN\, INFO Project: GCP project name Public: If the image should be given public ACLs
-
-```go
-type Gcp struct {
- Bucket string `yaml:"bucket,omitempty" json:"bucket,omitempty"`
- CreateImage bool `yaml:"create_image" json:"create_image"`
- Deprecated bool `yaml:"deprecated" json:"deprecated"`
- Description string `yaml:"description" json:"description"`
- Enabled bool `yaml:"enabled,omitempty" json:"enabled,omitempty"`
- Family string `yaml:"family" json:"family"`
- License []string `yaml:"license" json:"license"`
- LogLevel string `yaml:"log_level" json:"log_level"`
- Project string `yaml:"project,omitempty" json:"project,omitempty"`
- Public bool `yaml:"public,omitempty" json:"public,omitempty"`
-}
-```
-
-### func \(\*Gcp\) GetPublishCommand
-
-```go
-func (g *Gcp) GetPublishCommand(buildID string) (string, error)
-```
-
-GetPublishCommand returns the cosa upload command for GCP
-
-## type Job
-
-Job refers to the Jenkins options BuildName: i\.e\. rhcos\-4\.7 IsProduction: enforce KOLA tests StrictMode: only run explicitly defined stages VersionSuffix: name to append\, ie\. devel
-
-```go
-type Job struct {
- BuildName string `yaml:"build_name,omitempty" json:"build_name,omitempty"`
- IsProduction bool `yaml:"is_production,omitempty" json:"is_production,omitempty"`
- StrictMode bool `yaml:"strict,omitempty" json:"strict,omitempty"`
- VersionSuffix string `yaml:"version_suffix,omitempty" json:"version_suffix,omitempty"`
- // ForceArch forces a specific architecutre.
- ForceArch string `yaml:"force_arch,omitempty" json:"force_arch,omitempty"`
- // Unexported minio valued (run-time options)
- MinioCfgFile string // not exported
-
- // Runtime config options for SSH. Not exported for safety.
- MinioSSHForward string
- MinioSSHUser string
- MinioSSHKey string
-}
-```
-
-## type JobSpec
-
-JobSpec is the root\-level item for the JobSpec\.
-
-```go
-type JobSpec struct {
- Archives Archives `yaml:"archives,omitempty" json:"archives,omitempty"`
- CloudsCfgs CloudsCfgs `yaml:"clouds_cfgs,omitempty" json:"cloud_cofgs,omitempty"`
- Job Job `yaml:"job,omitempty" json:"job,omitempty"`
- Recipe Recipe `yaml:"recipe,omitempty" json:"recipe,omitempty"`
- Spec Spec `yaml:"spec,omitempty" json:"spec,omitempty"`
-
- // PublishOscontainer is a list of push locations for the oscontainer
- PublishOscontainer PublishOscontainer `yaml:"publish_oscontainer,omitempty" json:"publish_oscontainer,omitempty"`
-
- // Stages are specific stages to be run. Stages are
- // only supported by Gangplank; they do not appear in the
- // Groovy Jenkins Scripts.
- Stages []Stage `yaml:"stages" json:"stages"`
-
- // DelayedMetaMerge ensures that 'cosa build' is called with
- // --delayed-meta-merge
- DelayedMetaMerge bool `yaml:"delay_meta_merge" json:"delay_meta_meta,omitempty"`
-}
-```
-
-### func JobSpecFromFile
-
-```go
-func JobSpecFromFile(f string) (j JobSpec, err error)
-```
-
-JobSpecFromFile return a JobSpec read from a file
-
-### func JobSpecFromRepo
-
-```go
-func JobSpecFromRepo(url, ref, specFile string) (JobSpec, error)
-```
-
-JobSpecFromRepo clones a git repo and returns the jobspec and error\.
-
-### func JobSpecReader
-
-```go
-func JobSpecReader(in io.Reader) (j JobSpec, err error)
-```
-
-JobSpecReader takes and io\.Reader and returns a ptr to the JobSpec and err
-
-### func \(\*JobSpec\) AddCliFlags
-
-```go
-func (js *JobSpec) AddCliFlags(cmd *pflag.FlagSet)
-```
-
-AddCliFlags returns the pflag set for use in the CLI\.
-
-### func \(\*JobSpec\) AddRepos
-
-```go
-func (js *JobSpec) AddRepos()
-```
-
-AddRepos adds an repositories from the CLI
-
-### func \(\*JobSpec\) GenerateStages
-
-```go
-func (j *JobSpec) GenerateStages(fromNames, testNames []string, singleStage bool) error
-```
-
-GenerateStages creates stages\.
-
-### func \(\*JobSpec\) GetStage
-
-```go
-func (j *JobSpec) GetStage(id string) (*Stage, error)
-```
-
-GetStage returns the stage with the matching ID
-
-### func \(\*JobSpec\) WriteJSON
-
-```go
-func (js *JobSpec) WriteJSON(w io.Writer) error
-```
-
-WriteJSON returns the jobspec
-
-### func \(\*JobSpec\) WriteYAML
-
-```go
-func (js *JobSpec) WriteYAML(w io.Writer) error
-```
-
-WriteYAML returns the jobspec in YAML
-
-## type Override
-
-Override describes RPMs or Tarballs to include as an override in the OSTree compose\.
-
-```go
-type Override struct {
- // URI is a string prefixed with "file://" or "http(s)://" and a path.
- URI string `yaml:"uri,omitempty" json:"uri,omitempty"`
-
- // Rpm indicates that the file is RPM and should be placed in overrides/rpm
- Rpm *bool `yaml:"rpm,omitempty" json:"rpm,omitempty"`
-
- // Tarball indicates that the file is a tarball and will be extracted to overrides.
- Tarball *bool `yaml:"tarball,omitempty" json:"tarball,omitempty"`
-
- // Tarball type is an override Tarball type
- TarballType *string `yaml:"tarball_type,omitempty" json:"tarball_type,omitempty"`
-}
-```
-
-### func \(\*Override\) Fetch
-
-```go
-func (o *Override) Fetch(l *log.Entry, path string, wf TarDecompressorFunc) error
-```
-
-Fetch reads the source and writes it to disk\. The decompressor function is likely lazy\, but allows for testing\.
-
-### func \(\*Override\) writePath
-
-```go
-func (o *Override) writePath(basePath string) (string, error)
-```
-
-writePath gets the path that the file should be extract to
-
-## type PublishOscontainer
-
-PublishOscontainer describes where to push the OSContainer to\.
-
-```go
-type PublishOscontainer struct {
- // BuildStrategyTLSVerify indicates whether to verify TLS certificates when pushing as part of a OCP Build Strategy.
- // By default, TLS verification is turned on.
- BuildStrategyTLSVerify *bool `yaml:"buildstrategy_tls_verify" json:"buildstrategy_tls_verify"`
-
- // Registries is a list of locations to push to.
- Registries []Registry `yaml:"registries" json:"regristries"`
-}
-```
-
-## type PushSecretType
-
-PushSecretType describes the type of push secret\.
-
-```go
-type PushSecretType string
-```
-
-## type Recipe
-
-Recipe describes where to get the build recipe/config\, i\.e fedora\-coreos\-config GitRef: branch/ref to fetch from GitUrl: url of the repo
-
-```go
-type Recipe struct {
- GitRef string `yaml:"git_ref,omitempty" json:"git_ref,omitempty"`
- GitURL string `yaml:"git_url,omitempty" json:"git_url,omitempty"`
- Repos []*Repo `yaml:"repos,omitempty" json:"repos,omitempty"`
-}
-```
-
-## type Registry
-
-Registry describes the push locations\.
-
-```go
-type Registry struct {
- // URL is the location that should be used to push the secret.
- URL string `yaml:"url" json:"url"`
-
- // TLSVerify tells when to verify TLS. By default, its true
- TLSVerify *bool `yaml:"tls_verify,omitempty" json:"tls_verify,omitempty"`
-
- // SecretType is name the secret to expect, should PushSecretType*s
- SecretType PushSecretType `yaml:"secret_type,omitempty" json:"secret_type,omitempty"`
-
- // If the secret is inline, the string data, else, the cluster secret name
- Secret string `yaml:"secret,omitempty" json:"secret,omitempty"`
-}
-```
-
-## type RenderData
-
-RenderData is used to render commands
-
-```go
-type RenderData struct {
- JobSpec *JobSpec
- Meta *cosa.Build
-}
-```
-
-### func \(\*RenderData\) ExecuteTemplateFromString
-
-```go
-func (rd *RenderData) ExecuteTemplateFromString(s ...string) ([]string, error)
-```
-
-ExecuteTemplateFromString returns strings\.
-
-### func \(\*RenderData\) ExecuteTemplateToWriter
-
-```go
-func (rd *RenderData) ExecuteTemplateToWriter(in io.Reader, out io.Writer) error
-```
-
-ExecuteTemplateToWriter renders an io\.Reader to an io\.Writer\.
-
-### func \(\*RenderData\) RendererExecuter
-
-```go
-func (rd *RenderData) RendererExecuter(ctx context.Context, env []string, scripts ...string) error
-```
-
-RendererExecuter renders a script with templates and then executes it
-
-### func \(\*RenderData\) executeTemplate
-
-```go
-func (rd *RenderData) executeTemplate(r io.Reader) ([]byte, error)
-```
-
-executeTemplate applies the template to r\.
-
-## type Repo
-
-Repo is a yum/dnf repositories to use as an installation source\.
-
-```go
-type Repo struct {
- Name string `yaml:"name,omitempty" json:"name,omitempty"`
-
- // URL indicates that the repo file is remote
- URL *string `yaml:"url,omitempty" json:"url,omitempty"`
-
- // Inline indicates that the repo file is inline
- Inline *string `yaml:"inline,omitempty" json:"inline,omitempty"`
-}
-```
-
-### func \(\*Repo\) Writer
-
-```go
-func (r *Repo) Writer(path string) (string, error)
-```
-
-Writer places the remote repo file into path\. If the repo has no name\, then a SHA256 of the URL will be used\. Returns path of the file and err\.
-
-## type S3
-
-S3 describes the location of the S3 Resource\. Acl: is the s3 acl to use\, usually 'private' or 'public' Bucket: name of the S3 bucket Path: the path inside the bucket
-
-```go
-type S3 struct {
- ACL string `yaml:"acl,omitempty" envVar:"S3_ACL" json:"acl,omitempty"`
- Bucket string `yaml:"bucket,omitempty" envVar:"S3_BUCKET" json:"bucket,omitempty"`
- Path string `yaml:"path,omitempty" envVar:"S3_PATH" json:"path,omitempty"`
-}
-```
-
-## type Spec
-
-Spec describes the RHCOS JobSpec\. GitRef: branch/ref to fetch from GitUrl: url of the repo
-
-```go
-type Spec struct {
- GitRef string `yaml:"git_ref,omitempty" json:"git_ref,omitempty"`
- GitURL string `yaml:"git_url,omitempty" json:"git_url,omitempty"`
-}
-```
-
-## type Stage
-
-Stage is a single stage\.
-
-```go
-type Stage struct {
- ID string `yaml:"id,omitempty" json:"id,omitempty"`
- Description string `yaml:"description,omitempty" json:"description,omitempty"`
- ConcurrentExecution bool `yaml:"concurrent,omitempty" json:"concurrent,omitempty"`
-
- // DirectExec signals that the command should not be written
- // to a file. Rather the command should directly executed.
- DirectExec bool `yaml:"direct_exec,omitempty" json:"direct_exec,omitempty"`
-
- // NotBlocking means that the stage does not block another stage
- // from starting execution (i.e. concurrent stage).
- NotBlocking bool `yaml:"not_blocking,omitempty" json:"not_blocking,omitempty"`
-
- // RequireArtifacts is a name of the required artifacts. If the
- // required artifact is missing (per the meta.json), the stage
- // will not be executed. RequireArticts _implies_ sending builds/builds.json
- // and builds//meta.json.
- RequireArtifacts []string `yaml:"require_artifacts,flow,omitempty" json:"require_artifacts,omitempty"`
-
- // RequestArtifacts are files that are provided if they are there. Examples include
- // 'caches' for `/srv/cache` and `/srv/tmp/repo` tarballs or `ostree` which are really useful
- // for base builds.
- RequestArtifacts []string `yaml:"request_artifacts,flow,omitempty" json:"request_artifacts,omitempty"`
-
- // BuildArtifacts produces "known" artifacts. The special "base"
- // will produce an OSTree and QCOWs.
- BuildArtifacts []string `yaml:"build_artifacts,flow,omitempty" json:"build_artifacts,omitempty"`
-
- // Commands are arbitrary commands run after an Artifact builds.
- // Instead of running `cosa buildextend-?` as a command, its preferrable
- // use the bare name in BuildArtifact.
- Commands []string `yaml:"commands,flow,omitempty" json:"commands,omitempty"`
-
- // PublishArtifacts will upload defined BuildArtifacts to the cloud providers
- PublishArtifacts []string `yaml:"publish_artifacts,omitempty" json:"publish_artifacts,omitempty"`
-
- // PrepCommands are run before Artifact builds, while
- // PostCommands are run after. Prep and Post Commands are run serially.
- PrepCommands []string `yaml:"prep_commands,flow,omitempty" json:"prep_commands,omitempty"`
- PostCommands []string `yaml:"post_commands,flow,omitempty" json:"post_commands,omitempty"`
-
- // PostAlways ensures that the PostCommands are always run.
- PostAlways bool `yaml:"post_always,omitempty" json:"post_always,omitempty"`
-
- // ExecutionOrder is a number value that defines the order of stages. If two stages
- // share the same execution order number, then they are allowed to run concurrently to each other.
- ExecutionOrder int `yaml:"execution_order,omitempty" json:"execution_order,omitempty"`
-
- // ReturnCache returns a tarball of `/srv/cache`, while RequireCahce ensures the tarball
- // is fetched unpacked into `/srv/cahce`. RequestCache is a non-blocking, optional versopn
- // of RequireCache.
- ReturnCache bool `yaml:"return_cache,omitempty" json:"return_cache,omitempty"`
- RequireCache bool `yaml:"require_cache,omitempty" json:"require_cache_repo,omitempty"`
- RequestCache bool `yaml:"request_cache,omitempty" json:"reqest_cache_repo,omitempty"`
-
- // ReturnCacheRepo returns a tarball of `/srv/repo`, while RequireCacheRepo ensures the
- // tarball is fetched and unpacked into `/srv/repo`. RequestCacheRepo is a non-blocking, optional
- // version of RequireCacheRepo
- ReturnCacheRepo bool `yaml:"return_cache_repo,omitempty" json:"return_cache_repo,omitempty"`
- RequireCacheRepo bool `yaml:"require_cache_repo,omitempty" json:"require_cache_repo_repo,omitempty"`
- RequestCacheRepo bool `yaml:"request_cache_repo,omitempty" json:"request_cache_repo_repo,omitempty"`
-
- // KolaTests are shorthands for testing.
- KolaTests []string `yaml:"kola_tests,omitempty" json:"kola_tests,omitempty"`
-
- // Overrides is a list of Overrides to apply to the OS tree
- Overrides []Override `yaml:"overrides,omitempty" json:"overrides,omitempty"`
-}
-```
-
-### func \(\*Stage\) AddCommands
-
-```go
-func (s *Stage) AddCommands(args []string)
-```
-
-AddCommands adds commands to a stage
-
-### func \(\*Stage\) AddRequires
-
-```go
-func (s *Stage) AddRequires(args []string)
-```
-
-AddRequires adds in requires based on the arifacts that a stage requires inconsideration of what the stage builds
-
-### func \(\*Stage\) DeepCopy
-
-```go
-func (s *Stage) DeepCopy() (Stage, error)
-```
-
-DeepCopy does a lazy deep copy by rendering the stage to JSON and then returning a new Stage defined by the JSON
-
-### func \(\*Stage\) Execute
-
-```go
-func (s *Stage) Execute(ctx context.Context, rd *RenderData, envVars []string) error
-```
-
-Execute runs the commands of a stage\.
-
-### func \(\*Stage\) getCommands
-
-```go
-func (s *Stage) getCommands(rd *RenderData) ([]string, error)
-```
-
-getCommands renders the automatic artifacts and publication commands
-
-### func \(\*Stage\) getPostCommands
-
-```go
-func (s *Stage) getPostCommands(rd *RenderData) ([]string, error)
-```
-
-getPostCommands generates the post commands from a synthatis of pre\-defined post commands\, kola tests and the cloud publication steps\.
-
-### func \(\*Stage\) getPublishCommands
-
-```go
-func (s *Stage) getPublishCommands(rd *RenderData) ([]string, error)
-```
-
-getPublishCommands returns the cloud publication commands\.
-
-## type TarDecompressorFunc
-
-TarDecompressorFunc is a function that handles decompressing a file\.
-
-```go
-type TarDecompressorFunc func(io.ReadCloser, string) error
-```
-
-## type kolaTests
-
-```go
-type kolaTests map[string]Stage
-```
-
-
-
-Generated by [gomarkdoc]()
diff --git a/docs/gangplank/usage.md b/docs/gangplank/usage.md
deleted file mode 100644
index 416a27aa405b23bfd7c716b596224c962158a9a1..0000000000000000000000000000000000000000
--- a/docs/gangplank/usage.md
+++ /dev/null
@@ -1,436 +0,0 @@
-# Using Gangplank
-
-Gangplank's sole purpose in life is to codify the knowledge of building CoreOS variants and CoreOS-like operating systems using CoreOS Assembler. Gangplank knows how to create the environment, execute CoreOS Assembler, and coordinate artifacts.
-
-## Terms
-
-- OpenShift: Red Hat's Kubernetes Platform
-- BuildConfig: a (Custom Resource Definition) CRD used by OpenShift that builds containers and other build artifacts
-- Unbound Pod: Any instance of Gangplank that is running outside the context of an OpenShift BuildConfig
-
-## Design Idea
-
-Gangplank's core design principle is that containers are the most suitable modern method of orchestrating builds. Gangplank grew out of the various Jenkins libraries and scripts codifying the execution of various versions of COSA.
-
-Gangplank only knows _how to run COSA_, but running COSA does not require Gangplank. Today it understands how to:
-
-- Run on generic Kubernetes version 1.17+ and OpenShift version 3.11 and 4.x. as an "unbound pod"
-- Be used as an OpenShift BuildConfig
-- Execute locally using podman
-- Read meta.json and Jobspec files
-
-### Menu vs Buffet
-
-Gangplank, with the exception of local podman mode, is intended to run in the CoreOS Assembler container. Prior to Gangaplnk, a considerable amount of time was spend keeping pipelines, JobSpecs and COSA code-bases in sync. Gangplank seeks to eliminate the mismatch by being part of CoreOS Assembler. Once started, Gangplank will be re-executed as a new pod that has suitable permissions and resources to build a CoreOS variant. When running on OpenShift or Kubernetes, Gangplank will use its pod specification to create worker pods. In other words, Gangplank is tightly coupled to its corresponding COSA container.
-
-The origin pod (the first instance of Gangplank) handles the orchestration of workers by:
-- parsing the environment
-- reading the jobspec
-- creating child worker pod definitions
-- sending work to worker pods and waiting for completion of work
-- life-cycle operations (create/delete/etc) for workers
-
-Previous build systems have used Jenkins Kubernetes plugins for the pod creation and life-cycling of the worker pods. The problem with approach is that each OpenShift/Kubernetes environment would have unique differences that caused pipeline drift. For example, the production pipeline for RHCOS uses a different set of secret names than the development location.
-
-Gangplank, therefore, evaulates its environment to determine the mode of the build.
-
-*NOTE: When running in a Kubernetes/OpenShift cluster, Gangplank requires a service account that can read secrets AND create/delete pods.*
-
-## Execution Choices
-
-Gangplank has three execution modes, each targeted at a different use-case.
-
-### OpenShift BuildConfig
-
-Gangplank originally started as an OpenShift BuildConfig custom-build strategy. As a result, Gangplank uses the OpenShift BuildAPI v1 object definition for performing builds. When run as a BuildConfig, Gangplank can perform builds via `oc start-build`.
-
-The BuildConfig mode is intended for developer and re-build tasks.
-
-### Unbounded Pods on OpenShift or Kubernetes
-
-Gangplank will execute happily on a generic Kubernetes or OpenShift 3.11 (requirement for an SCC privileged account, worker nodes must have a `/dev/kvm`) or OpenShift 4.5+ (must have access to the kube-virt labeled nodes)
-
-This mode of operation is called "unbounded" since the pod is not bound to a BuildConfig, and something else (such as CI) is corrdinating the pod's life-cycle.
-
-Unbounded mode is targeted at Jenkins and other CI build systems.
-
-### Podman mode (for Developers)
-
-For the developer use-case or even building on virtual machine, Gangplank supports running as a `podman` privileged pod. In podman, Gangplank will create worker pods.
-
-This requires the `podman-remote` package installed and enabled (enabled, via `systemctl --now enable podman.socket`).
-
-Example command:
-```
-$ gangplank pod
- --podman \
- --workDir
- --spec
-```
-
-The following are optional commands:
-- `-A ` or `--artifact ` will build a specific artifact. When `-A` is provided, no jobspec is required.
-- `--srvDir` directly expose `/srv` to the pod. If this is not defined, then Gangplank will create an ephemeral working `/srv` which will be cleaned up on exit
-- `setWorkDirCtx` will set the proper selinux permmissions for `--workDir` and `--srvDir`
-
-If `--workDir` is defined, the build output will be emited to `/builds`.
-
-*btrfs warning*: Gangplank can run multiple pods at a single time. When done on a `btrfs`, the clean-up can be are ridiciously slow/hang. If you are building on `btrfs` (default for Fedora Workstation 33+), it is recommended that you turn off copy-on-write (COW) and use a `--workDir` with that directory if using parallel stages. Example:
-```
-mkdir ~/workdir
-chattr +C file ~/workdir
-gangplank pod --workDir ~/workDir -m minio.cfg`), then add `-m minio.cfg` to all other Gangplank commands. Gangplank further supports the use of S3-compatible object stores (i.e. AWS) via the `-m` directive. Gangplank uses the object store backend for reading files and discovery of requirements.
-
-Regardless of where the pod is being run, Gangplank will stream logs from the worker pods. If the supervising Gangplank is terminated, the workers are terminated.
-
-All meta-data that is found will be provided to the workers. `kola` test results, logs and new meta-data and any new artifact generated are returned to the origin Gangplank.
-
-### Build Short-hands
-
-To support distributed builds, Gangplank has two special build short-hands of "base" and "finalize":
-
-```yaml
-stages:
- - id: base
- build_artifacts:
- - base
- - finalize
-```
-
-The `base` short-hand corresponds to `cosa build --delay-meta-merge`, while `finalize` corresponds to `cosa meta --finalize`. By default, `cosa build` uses the `delay-meta-merge` since Gangplank is performing a distributed build. In general `finalize` should be the last stage.
-
-### JobSpec
-
-The JobSpec (or Job Specification) is simply YAML that instructs Gangplank on the steps and dependencies for starting a build.
-
-To get started with a JobSpec, you can generate one using Gangplank via `gangplank generate -A base`
-
-Example spec:
-```
-$ bin/gangplank generate -A base
-INFO[0000] Gangplank: COSA OpenShift job runner, 2021-03-02.9dce8136~dirty
-# Generated by Gangplank CLI
-# 2021-03-02T17:25:42-07:00
-job:
- strict: true
-
-minio:
- bucket: builder
-
-recipe:
- git_ref: "release-4.8"
- git_url: https://github.com/openshift/os
- repos:
- # Inline repos are defined in the jobspec
- - name: repos
- inline: |
-
- # URL should reference a file with repository definition(s)
- - name: repo1
- url: https://example.com/repo/repo.file
-
-# publish_ocontainer describes locations to push the oscontainer to.
-publish_oscontainer:
- # TLS verification for build strategy builds. Defaults to true
- # Push registry comes from the build.openshift.io's build.spec.output
- # specification.
- buildstrategy_tls_verify: true
-
- # list of push locations to push osconatiner to.
- registries:
- # push to a cluster address using an service account token
- # to login to the regitry (only useful in cluster)
- - url: "first.registry.example.com/stream/name:tag"
- secret_type: token
- tls_verify: false
-
- # push with an inline secret
- - url: "second.registry.example.com/stream/name:tag",
- secret_type: inline
- secret: ""
-
- # push using an incluser secret name "builder-secret"
- # the service account running Gangplank will need to be
- # able to read the secret
- - url: "third.registry.exmaple.com/stream/name:tag",
- secret_type: cluster
- secret: builder-secret
-
-- id: ExecOrder 1 Stage
- description: Stage 1 execution base
- build_artifacts: [base]
- execution_order: 1
- request_cache: true
- request_cache_repo: true
-
-- id: ExecOrder 5 Stage
- description: Stage 5 execution aws
- require_artifacts: [qemu]
- build_artifacts: [aws]
- execution_order: 5
-
-- id: ExecOrder 5 Stage
- description: Stage 5 execution gcp
- require_artifacts: [qemu]
- build_artifacts: [gcp]
- execution_order: 5
-
-delay_meta_merge: true
-
-```
-
-The JobSpec defines discrete units of work as a "stage". Each stage supports few options:
-
-- id: the name of the stage; must be unique
-- command: a list of commands to execute
-- concurrent: bool to indicate if the `commands` can be executed concurrently
-- build_artifacts: known artifacts to build
-- direct_execution: do not apply templating
-- prep_commands: a list of commands to run before command
-- post_commands: a list of commands to run last (such as test or cleanup)
-- post_always: a bool that indicates whether the `post_commands` should _always_ be executed regardless of the success of the `commands` stage.
-- require_artifacts: the type of artifact that's required for work (i.e. `qemu` or `aws`). Stages will not start until the artifact appears.
-- request_artifacts: a list of optional artifacts that would be _nice_ to have, but are not blocking.
-- {return,require,request}{cache,cache_repo}: bool value that indicates whether to requires, require or return a tarball of the cache (`/srv/cache`) or the cache_repo (`/srv/tmp/repo`).
-
-To illustrate this, consider:
-```yaml
-
-- id: ExecOrder 1 Stage
- description: Stage 1 execution base
- build_artifacts: [base]
- execution_order: 1
- request_cache: true
- request_cache_repo: true
-
-- id: ExecOrder 5 Stage
- description: Stage 5 execution aws
- require_artifacts: [qemu]
- build_artifacts: [aws]
- execution_order: 5
-
-- id: ExecOrder 5 Stage
- description: Stage 5 execution gcp
- require_artifacts: [qemu]
- build_artifacts: [gcp]
- execution_order: 5
-
-- id: ExecOrder 999 Stage
- description: Stage 999 execution finalize
- build_artifacts: [finalize]
- execution_order: 999
-
-```
-
-In this example:
-
-1. In the base stage, Gangplank will provide `/srv/cache` and `/srv/tmp/repo` from `cache/*` if the tarballs exist, and optionally provide the latest `oscontainer`. Gangplank will return the build artifacts and new cache tarballs.
-1. In the oscontainer stage, Gangplank will require the caches.
-1. In the `ExecOrder 5` stages, two new pods will concurrently build AWS and GCP but only after the QEMU artifact is found.
-1. The final `ExecOrder 999` stage will combine `meta.*.json` to produce a final `meta.json`.
-
-### Meta Data and JobSpec Templating
-
-Gangplank was initially started after belately realizing that the Jenkins Pipelines are, in fact, complicated templating engines. That is, a considerable amount of time, energy and development was put into translating data from YAML/JSON into execution rules.
-
-Gangplank supports rendering commands from the `meta.json` in CoreOS Assembler and the JobSpec via Golang templates. The caveat, however, is that `meta.json` variables appear _after_ the base build. Generally speaking, this means inputs to a base build are defined in the Jobspec while artifacts generated from a base build may use both `meta.json` and the Jobspec.
-
-#### JobSpec Example
-
-{% raw %}
-Any JobSpec variable is exposed using the GoLang templating `{{.JobSpec.}}`
-
-```
-archives:
- s3:
- bucket: darkarts
- path: magicalmysteries
-stages:
- - id: prep
- command:
- - cosa buildfetch --url=s3://{{.JobSpec.Archives.S3.Bucket}}/{{.JobSpec.Archives.S3.Path}}
-```
-{% endraw %}
-
-The above example will run the CoreOS Assembler command `cosa buildfetch --url=s3://darkarts/magicalmysteries`.
-
-#### meta.json
-
-`meta.json` fields become available for any stage that is executed after Gangplank detects a new `meta.json`. Data for a `meta.json` is prefixed using `.Meta`. `meta.json` is always read immediately before a stage is executed (if `meta.json` exists).
-
-{% raw %}
-```
-stages:
- - id: build
- build_artifacts:
- - base
- - id: make a file
- command:
- - touch {{ .Meta.BuildID }}
-```
-{% endraw %}
-
-### Templating Logic
-
-With the availability of GoLang templating, the possibility exists to do loops and to dynamically create commands. The following example publishes an AMI to all AWS regions.
-
-NOTE: It may be tempting to turn Gangplank into a complicated templating engine. Users would well be advised to consider whether the added complexity helps. In most cases, using simple, clear, and easy-to-understand templating logic is the better choice.
-
-{% raw %}
-```
-archives:
- s3:
- bucket: darkarts
- path: magicalmysteries
-clouds_cfgs:
- aws:
- amipath: foobar
- regions:
- - us-east-1
- - us-west-1
-stages:
- - id: publish to AWS
- commands:
- - upload-ami --build {{.Meta.BuildID}} --region {{.JobSpec.CloudsCfgs.Aws.Regions[0]}} --bucket=s3://{{.JobSpec.Archives.S3.Bucket}}/{{.JobSpec.Archives.S3.Path}}
- - cosa aws-replicate --build {{.Meta.BuildID}} --regions {{for _, $y := range .JobsSpec.CloudsCfgs.Aws.Regions}}{{$y}}{{end.}}
-```
-{% endraw %}
-
-### The Schema
-
-CoreOS Assembler and Mantle (publication and testing for CoreOS-like operating sytems) share a schema that understands `meta.json`. Gangplank only understands a few commands regarding the location of artifacts. When artifacts are added to, or removed from, the [CoreOS Assembler schema](../../src/schema/v1.json) Gangplank's support will change.
-
-Gangplank uses the schema for:
-
-- locating artifacts via their top level name (i.e. `qemu` or `metal4k`)
-- creating `cosa buildextend-*` commands
-- templating commands
-
-## Minio
-
-The choice of Minio was deliberate: its an open source S3-comptabile object store that is light weight, and has GoLang bindings. The use of Minio in the case of Gangplank is purely for the coordination files. Gangplank requires either Minio or access to an S3 object store.
-
-### Standalone mode
-
-If an external Minio/S3 server is not defined, Gangplank runs Minio from the directory defined as `--srvDir`. A new directory of "builder" (or whatever bucket you've chosen) will be created under the `--srvDir` parameter.
-
-### External mode
-
-Running Minio in external mode is relatively easy:
-- [Simple OpenShift Deployment](https://github.com/darkmuggle/minio-ocp)
-- [Minio's Official Kubuernetes Documentation](https://docs.min.io/docs/deploy-minio-on-kubernetes.html)
-- Podman:
-```
-$ podman volume create minio
-$ podman create -p 9000 --name minio -v minio:/data \
- -e MINIO_ACCESS_KEY=key \
- -e MINIO_SECRET_ACCESS_KEY=key \
- docker.io/minio/minio:latest \
- server /data
-$ podman start minio
-```
-
-Gangplank understands how to use an external minio host via `-m config.json`. Where `config.json` has the following format:
-```
-{
- "accesskey": "minioadmin",
- "secretkey": "minioadmin",
- "host": "192.168.3.9",
- "port": 9000,
- "external_server": true,
- "region": ""
-}
-```
-
-Example of AWS Config:
-```
-{
- "accesskey": "",
- "secretkey": "",
- "host": "us-west-1.s3.amazonaws.com",
- "port": 443
- "external_server": true,
- "region": "us-west-1"
-}
-```
diff --git a/docs/kola.md b/docs/kola.md
index c319afb94a1ef252b47c715fb5dd3602500df0b9..60a3d941d22356ba54342017d6af2abdab7f4b91 100644
--- a/docs/kola.md
+++ b/docs/kola.md
@@ -10,7 +10,7 @@ Kola is a framework for testing software integration in CoreOS systems
across multiple platforms. It is primarily designed to operate within
the CoreOS Assembler for testing software that has landed in the OS image.
-Kola supports running tests on multiple platforms, currently QEMU, GCE,
+Kola supports running tests on multiple platforms, currently QEMU, GCP,
AWS, VMware VSphere, Packet, and OpenStack. In the future systemd-nspawn and
other platforms may be added.
Local platforms do not rely on access to the Internet as a design
@@ -25,9 +25,8 @@ inspection.
Kola is still under heavy development and it is expected that its
interface will continue to change.
-By default, kola uses the `qemu-unprivileged` platform with the most recently
-built image (assuming it is run from within a CoreOS Assembler working
-directory).
+By default, kola uses the `qemu` platform with the most recently built image
+(assuming it is run from within a CoreOS Assembler working directory).
1. TOC
{:toc}
@@ -55,6 +54,11 @@ and can also be used with glob patterns:
Tests specified in `src/config/kola-denylist.yaml` will also be skipped
regardless of whether the switch `--denylist-test` was provided.
+It's also possible to skip tests based on tags by prefixing
+the tag by `!`:
+
+`kola run --tag '!reprovision'`
+
Example format of the file:
```yaml
@@ -80,6 +84,9 @@ Example format of the file:
- pattern: test2.test
...
```
+
+The special pattern `skip-console-warnings` suppresses the default check for kernel errors on the console which would otherwise fail a test.
+
## kola list
The list command lists all of the available tests.
@@ -159,6 +166,16 @@ For more examples, look at the
suite of tests under kola. These tests were ported into kola and make
heavy use of the native code interface.
+## kola non-exclusive tests
+
+Some tests are light weight and do not involve complex interactions like reboots
+and multiple machines. Tests that are not expected to conflict with other tests can be
+marked as "non-exclusive", so that they are run in the same VM to save resources.
+
+External tests can be marked as non-exclusive via kola.json or an inline tag.
+Note: tests compiled in kola (non external tests) cannot be marked as non-exclusive.
+This is deliberate as tests compiled in kola should be complex and thus exclusive.
+
## Manhole
The `platform.Manhole()` function creates an interactive SSH session which can
@@ -171,3 +188,61 @@ automatically SSH into a machine when any `MustSSH` calls fail.
kolet is run on kola instances to run native functions in tests. Generally kolet
is not invoked manually.
+
+## More information on tests
+
+After you run the kola test, you can find more information in `tmp/kola/` about the test that just ran, as the following file logs. They will help you to debug the problem and will certainly give you hints along the way.
+
+1. `journal.txt`
+2. `console.txt`
+3. `ignition.json`
+4. `journal-raw.txt.gz`
+
+## Extended artifacts
+
+1. Extended artifacts need additional forms of testing (You can pass the ignition and the path to the artifact you want to test)
+2. `cosa kola run -h` (this allows you to see the commands yourself and what syntax is needed)
+3. `cosa buildextend-"name_of_artifact"` (An example of building an extended artifact)
+4. `kola run -p ` Is the most generic way of testing extended artifacts, this is mostly useful for the cloud platforms
+5. For running the likes of metal/metal4k artifacts there's not much difference than running `kola run` from the coreos-assembler
+6. `cd builds/latest/` (This will show your latest build information)
+7. `cosa list` (This will show you the most recent CoreOS builds that have been made and the artifacts that were created)
+8. In the case of the `testiso` command, you can determine what tests are running by looking for the pattern in the test name. It will follow: `test-to-run.disk-type.networking.multipath.firmware`. For example, the `iso-live-login.4k.uefi`, attempts to install FCOS/RHCOS to a disk that uses 4k sector size. If you don't see the 4k pattern, the `testiso` command will attempt to install FCOS/RHCOS to a non 4k disk (512b sector size).
+9. `cosa kola testiso iso-offline-install.mpath.uefi` (This is an example testing the live ISO build with no internet access using multipath and the uefi firmware.)
+
+Example output:
+
+```
+kola -p qemu testiso --inst-insecure --output-dir tmp/kola
+Ignoring verification of signature on metal image
+Running test: iso-as-disk.bios
+PASS: iso-as-disk.bios (12.408s)
+Running test: iso-as-disk.uefi
+PASS: iso-as-disk.uefi (16.039s)
+Running test: iso-as-disk.uefi-secure
+PASS: iso-as-disk.uefi-secure (16.994s)
+```
+
+## Useful commands
+
+`cosa kola run 'name_of_test'` This is how to run a single test, This is used to help debug specific tests in order to get a better understanding of the bug that's taking place. Once you run this command this test will be added to the tmp directory
+
+`cosa kola run basic` This will just run the basic tests
+
+`cosa kola run --parallel=3` This will run tests in parallel, 3 at a time.
+
+In order to see the logs for these tests you must enter the `tmp/kola/name_of_the_tests` and there you will find the logs (journal and console files, ignition used and so on)
+
+`cosa run` This launches the build you created (in this way you can access the image for troubleshooting). Also check the option -c (console).
+
+`cosa run -i ignition_path` You can run it passing your Ignition, or the Ignition used in the the test that failed for troubleshooting reasons.
+
+`kola list --json | jq -r '.[] | [.Name,.Description]| @tsv'` This will list all tests name and the description.
+
+## Run tests on cloud platforms
+`cosa kola run -p aws --aws-ami ami-0431766f2498820b8 --aws-region us-east-1 basic` This will run the basic tests on AWS using `ami-0431766f2498820b8` (fedora-coreos-37.20230227.20.2) with default instance type `m5.large`. Add `--aws-type ` if you want to use custom type. How to create the credentials refer to https://github.com/coreos/coreos-assembler/blob/main/docs/mantle/credentials.md#aws
+
+`kola run -p=gcp --gcp-image=projects/fedora-coreos-cloud/global/images/fedora-coreos-37-20230227-20-2-gcp-x86-64 --gcp-json-key=/data/gcp.json --gcp-project=fedora-coreos-testing basic` This will run the basic tests on GCP using default machine type `n1-standard-1`.
+- `gcp-image` is in the format of `projects//global/images/`, to find related info refer to https://builds.coreos.fedoraproject.org/browser?stream=testing-devel&arch=x86_64.
+- `gcp-json-key` is using a service account's JSON key for authentication, how to create service account keys refer to https://github.com/coreos/coreos-assembler/blob/main/docs/mantle/credentials.md#gcp.
+- `gcp-project` is meant for testing in the specified project, or it will use the same as ``.
\ No newline at end of file
diff --git a/docs/kola/adding-tests.md b/docs/kola/adding-tests.md
index 4853f4f7a9b3c4464cf73ae8b06cd79c14022134..b79074afee65522982d03b62eb859bbba4847d4a 100644
--- a/docs/kola/adding-tests.md
+++ b/docs/kola/adding-tests.md
@@ -11,7 +11,7 @@ nav_order: 1
## Quick Start
-1. Fork and clone the [`coreos-assembler` repository](https://github.com/coreos/coreos-aasembler/)
+1. Fork and clone the [`coreos-assembler` repository](https://github.com/coreos/coreos-assembler/)
2. Move into `mantle/kola/tests/` and look for the package your test would best fit
3. Edit the file and add your test(s), ensuring that you register your new test(s) in the packages `init()`
4. Commit, push, and PR your result
@@ -49,6 +49,7 @@ func init() {
ClusterSize: 1,
Name: `podman.noop`,
Distros: []string{"rhcos"},
+ Description: "Simple NOOP test for podman",
})
$ popd
@@ -84,6 +85,7 @@ Continuing with the look at the `podman` package we can see that `podman.base` i
ClusterSize: 1,
Name: `podman.base`,
Distros: []string{"rhcos"},
+ Description: "Verifies podman info and running with various options",
})
```
@@ -161,6 +163,7 @@ func init() {
Flags: []register.Flag{}, // See: https://godoc.org/github.com/coreos/coreos-assembler/mantle/kola/register#Flag
Distros: []string{"rhcos"},
FailFast: true,
+ Description: "Example test group",
})
}
diff --git a/docs/kola/external-tests.md b/docs/kola/external-tests.md
index f4d1701a9e4bcb08a91e445aaf889042a330ff21..8699325838411cad793cb754fd4009123c32cab9 100644
--- a/docs/kola/external-tests.md
+++ b/docs/kola/external-tests.md
@@ -188,7 +188,7 @@ Here's an example `kola.json`:
{
"architectures": "!s390x ppc64le",
"distros": "fcos",
- "platforms": "qemu-unpriv",
+ "platforms": "qemu",
"tags": "sometagname needs-internet skip-base-checks othertag",
"requiredTag": "special",
"additionalDisks": [ "5G" ],
@@ -199,7 +199,8 @@ Here's an example `kola.json`:
"appendFirstbootKernelArgs": "ip=bond0:dhcp bond=bond0:ens5,ens6:mode=active-backup,miimon=100"
"timeoutMin": 8,
"exclusive": true,
- "conflicts": ["ext.config.some-test", "podman.some-other-test"]
+ "conflicts": ["ext.config.some-test", "podman.some-other-test"],
+ "description": "test description"
}
```
@@ -221,32 +222,39 @@ with `kola run --tag`, but some tags have semantic meaning.
Tags with semantic meaning:
- `needs-internet`: Taken from the Autopkgtest (linked above). Currently only the `qemu` platform enforces this restriction.
- - `skip-base-checks`: Skip built-in checks for e.g. kernel warnings on the console.
+ - `platform-independent`: This test should pass or fail on all platforms (clouds and hardware architectures); it may be run less often.
+ - `skip-base-checks`: Skip built-in checks for e.g. kernel warnings on the console or systemd unit failures.
If a test has a `requiredTag`, it is run only if the required tag is specified.
In the example above, the test would only run if `--tag special` was provided.
The `additionalDisks` key has the same semantics as the `--add-disk` argument
-to `qemuexec`. It is currently only supported on `qemu-unpriv`.
+to `qemuexec`. It is currently only supported on `qemu`.
+
+The `injectContainer` boolean if set will cause the framework to inject
+the ostree base image container into the target system; the path can be
+found in the environment variable `KOLA_OSTREE_OCIARCHIVE`. This will be
+an `.ociarchive` file that can be e.g. loaded into the containers storage
+via `skopeo copy oci-archive:$KOLA_OSTREE_OCIARCHIVE containers-storage:localhost/os`.
The `minDisk` key takes a size in GB and ensures that an instance type with at
least the specified amount of primary disk space is used. On QEMU, this is
equivalent to the `--qemu-size` argument to `qemuexec`. This is currently only
-enforced on `qemu-unpriv` and `aws`.
+enforced on `qemu` and `aws`.
The `minMemory` key takes a size in MB and ensures that an instance type with
at least the specified amount of memory is used. On QEMU, this is equivalent to
the `--memory` argument to `qemuexec`. This is currently only enforced on
-`qemu-unpriv`.
+`qemu`.
The `additionalNics` key has the same semantics as the `--additional-nics` argument
-to `qemuexec`. It is currently only supported on `qemu-unpriv`.
+to `qemuexec`. It is currently only supported on `qemu`.
The `appendKernelArgs` key has the same semantics at the `--kargs` argument to
-`qemuexec`. It is currently only supported on `qemu-unpriv`.
+`qemuexec`. It is currently only supported on `qemu`.
The `appendFirstbootKernelArgs` key has the same semantics at the `--firstbootkargs`
-argument to `qemuexec`. It is currently only supported on `qemu-unpriv`.
+argument to `qemuexec`. It is currently only supported on `qemu`.
The `timeoutMin` key takes a positive integer and specifies a timeout for the test
in minutes. After the specified amount of time, the test will be interrupted.
@@ -269,12 +277,29 @@ inline per test, like this:
```sh
#!/bin/bash
set -xeuo pipefail
-# kola: { "architectures": "x86_64", "platforms": "aws gcp", "tags": "needs-internet" }
+# kola: { "architectures": "x86_64", "platforms": "aws gcp", "tags": "needs-internet", "description": "test" }
test code here
```
This metadata stanza must start with `# kola: ` and have a single line of JSON.
+Even more recently, you can write the test metadata as YAML inline; this is signified
+by using `## kola: `. The lines after it starting with `## ` will be parsed as metadata YAML.
+For example:
+
+```
+#!/bin/bash
+set -xeuo pipefail
+## kola:
+## architectures: x86_64
+## platforms: "aws gcp" # azure support is pending
+## tags: needs-internet
+## description: test description
+test code here
+```
+
+A notable advantage of YAML here is support for inline comments.
+
## Quick Start
1. In your project's upstream repository, create the `tests/kola` directory, if
@@ -298,14 +323,14 @@ $ cd my-project/tests/kola
$ $EDITOR basic/noop # Add the `noop` test
#!/bin/bash
set -xeuo pipefail
-# kola: { "architectures": "x86_64", "platforms": "qemu", "tags": "needs-internet" }
+# kola: { "architectures": "x86_64", "platforms": "qemu", "tags": "needs-internet", "description": "test" }
# Test: I'm a NOOP!
test 2 -gt 1
$ chmod a+x basic/noop # Make sure the test is executable
$ cosa kola run -p qemu --qemu-image path/to/qcow2 -E path/to/my-project/ 'ext.my-project.basic' # Run the test
=== RUN ext.my-project.basic
--- PASS: ext.my-project.basic (35.57s)
-PASS, output in _kola_temp/qemu-unpriv-2020-08-18-1815-2295199
+PASS, output in _kola_temp/qemu-2020-08-18-1815-2295199
```
## Fast build and iteration on your project's tests
diff --git a/docs/mantle/credentials.md b/docs/mantle/credentials.md
index 97ba01800714e59dc9eb560597ddb4afc73e3810..d4c184e330d9a3775580cac481047b426b8d0662 100644
--- a/docs/mantle/credentials.md
+++ b/docs/mantle/credentials.md
@@ -84,41 +84,51 @@ sudo emerge --ask awscli
## azure
-`azure` uses `~/.azure/azureProfile.json`. This can be created using the `az` [command](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli):
-```
-$ az login`
-```
-It also requires that the environment variable `AZURE_AUTH_LOCATION` points to a JSON file (this can also be set via the `--azure-auth` parameter). The JSON file will require a service provider active directory account to be created.
+The [azure sdk for go](https://github.com/Azure/azure-sdk-for-go) does
+[not support any file based authentication schemes](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity#defaultazurecredential).
+We'll use a JSON file generated by hand to pass authentication to our
+mantle code that will then use it to authenticate with azure services.
+
+First we must have a service principal and set of credentials to authenticate
+with. This can be created via the Azure CLI by the `az ad sp create-for-rbac`
+command. You must know your subscription ID in order to run this command. This
+can usually be picked up from `~/.azure/azureProfile.json` if you are logged
+in via the Azure CLI:
-Service provider accounts can be created via the `az` command (the output will contain an `appId` field which is used as the `clientId` variable in the `AZURE_AUTH_LOCATION` JSON):
```
-az ad sp create-for-rbac
+subscription='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+az ad sp create-for-rbac --name name --role Contributor --scopes "/subscriptions/${subscription}"
```
-The client secret can be created inside of the Azure portal when looking at the service provider account under the `Azure Active Directory` service on the `App registrations` tab.
+The output of this command is JSON formatted. Store the output of it in a file
+called `azureCreds.json`:
-You can find your subscriptionId & tenantId in the `~/.azure/azureProfile.json` via:
```
-cat ~/.azure/azureProfile.json | jq '{subscriptionId: .subscriptions[].id, tenantId: .subscriptions[].tenantId}'
+{
+ "appId": "11111111-2222-3333-4444-555555555555",
+ "displayName": "name",
+ "password": "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy",
+ "tenant": "66666666-7777-8888-9999-111111111111"
+}
```
-The JSON file exported to the variable `AZURE_AUTH_LOCATION` should be generated by hand and have the following contents:
+All we need now is to add the subscription ID information to the `azureCreds.json`
+so that the final file looks like:
+
```
{
- "clientId": "",
- "clientSecret": "",
- "subscriptionId": "",
- "tenantId": "",
- "activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
- "resourceManagerEndpointUrl": "https://management.azure.com/",
- "activeDirectoryGraphResourceId": "https://graph.windows.net/",
- "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
- "galleryEndpointUrl": "https://gallery.azure.com/",
- "managementEndpointUrl": "https://management.core.windows.net/"
+ "appId": "11111111-2222-3333-4444-555555555555",
+ "displayName": "name",
+ "password": "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy",
+ "tenant": "66666666-7777-8888-9999-111111111111",
+ "subscription: "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
}
-
```
+This file can be placed at `$HOME/.azure/azureCreds.json` or the path
+can be placed in the `$AZURE_CREDENTIALS` environment variable or passed
+via the `--azure-credentials` option on the command line.
+
## do
`do` uses `~/.config/digitalocean.json`. This can be configured manually:
@@ -143,14 +153,9 @@ The JSON file exported to the variable `AZURE_AUTH_LOCATION` should be generated
}
```
-## gce
-
-`gce` uses the `~/.boto` file. When the `gce` platform is first used, it will print
-a link that can be used to log into your account with gce and get a verification code
-you can paste in. This will populate the `.boto` file.
+## gcp
-See [Google Cloud Platform's Documentation](https://cloud.google.com/storage/docs/boto-gsutil)
-for more information about the `.boto` file.
+If you want to create a service account's JSON key for authentication, refer to [create service account keys](https://cloud.google.com/iam/docs/).
## openstack
@@ -186,13 +191,33 @@ for more information about the `.boto` file.
## qemu
-`qemu` is run locally and needs no credentials, but does need to be run as root.
-
-## qemu-unpriv
-
-`qemu-unpriv` is run locally and needs no credentials. It has a restricted set of functionality compared to the `qemu` platform, such as:
+`qemu` is run locally and needs no credentials. It has a few restrictions:
- No [Local cluster](platform/local/)
-- Usermode networking instead of namespaced networks
+- Usermode networking (no namespaced networks):
* Single node only, no machine to machine networking
- * Machines have internet access
+ * Machines have internet access by default
+
+## kubevirt
+
+`kubevirt` publishes a containerdisk which can be consumed by KubeVirt. In order to publish the containerdisk, the
+credentials to the container registry need to be provided in `~/.docker/config.json`.
+
+An example configuration may look like this:
+
+```
+{
+ "auths": {
+ "quay.io": {
+ "auth": "dXNlcjpwYXNzCg=="
+ }
+ }
+}
+```
+
+where `auth` is a base64 encoded HTTP Basic Auth token:
+
+```
+$ echo dXNlcjpwYXNzCg== | base64 -d
+user:pass
+```
diff --git a/docs/mantle/plume.md b/docs/mantle/plume.md
index 699bc8fbf77df05b704e2945473ab49dd30697e7..5c6b6a19f09a2dd908699b734a6b4ac94270506c 100644
--- a/docs/mantle/plume.md
+++ b/docs/mantle/plume.md
@@ -13,25 +13,10 @@ Fedora CoreOS and Fedora Cloud release utility. Releases are done in two
stages, each with their own command: pre-release and release. Both of these
commands are idempotent.
-## plume pre-release
-
-The pre-release command does as much of the release process as possible without making anything public.
-This includes uploading images to cloud providers (except those like gce which don't allow us to upload
-images without making them public).
-
## plume release
-Publish a new CoreOS/Cloud release. This makes the images uploaded by pre-release public and uploads
-images that pre-release could not. It copies the release artifacts to public storage buckets and updates
-the directory index.
-
-## plume index
-
-Generate and upload index.html objects to turn a Google Cloud Storage
-bucket into a publicly browsable file tree. Useful if you want something
-like Apache's directory index for your software download repository.
-Plume release handles this as well, so it does not need to be run as part of
-the release process.
+Publish a new CoreOS release. This makes uploaded images public and updates
+indexes.
## Pre-flight
diff --git a/docs/working.md b/docs/working.md
index ede7175224cc293646cf6537d58326f3030bf6de..3af0fe03804d1e84b4b8722807e98d86d760237c 100644
--- a/docs/working.md
+++ b/docs/working.md
@@ -111,18 +111,43 @@ In the future, it's likely coreos-assembler will also support something
like `overrides/src` which could be a directory of symlinks to local
git repositories.
-## Using cosa run --bind-ro for even faster iteration
+## Using cosa build-fast
If you're working on e.g. the kernel or Ignition (things that go into the initramfs),
then you probably need a `cosa build` workflow (or `cosa buildinitramfs-fast`, see below).
-However, let's say you want to test a change to something much later in the boot process - e.g. `podman`. Rather
+However, let's say you want to test a change to something that runs purely in the real root,
+such as e.g. `rpm-ostree`, `podman`, or `console-login-helper-messages`. Rather
than doing a full image build each time, a fast way to test out changes is to use
-something like this:
+`cosa build-fast`.
+
+This command assumes you have a previous local coreos-assembler build. From
+the git checkout of the project you want to add:
+
+```
+$ export COSA_DIR=/srv/builds/fcos
+$ cosa build-fast
+$ cosa run
+```
+
+The `cosa build-fast` command will run `make` and inject the resulting binaries
+on a qcow2 overlay file, which will appear in your project working directory.
+The `cosa run` command similarly knows to look for these `qcow2` files.
+
+This will not affect the "real" cosa build in `/srv/builds/fcos`, but will
+use it as a data source.
+
+## Using cosa run --bind-ro for even faster iteration
+
+This workflow can be used alongside `cosa build-fast`, or separate from it.
+If you invoke e.g.
```
$ cosa run --bind-ro ~/src/github/containers/podman,/run/workdir
```
+The target VM will have the source directory bound in `/run/workdir`; then you
+can directly copy binaries from your development environment into the VM.
+
If you are running cosa in a container, you will have to change your current
working directory to a parent directory common to both project directories and
use relative paths:
@@ -139,9 +164,6 @@ allowing you to directly execute binaries from there. You can also use e.g.
`rpm-ostree usroverlay` and then copy binaries from your host `/run/workdir` into
the VM's rootfs.
-(This currently only works on Fedora CoreOS which ships `9p`, not RHCOS. A future version
- will use https://virtio-fs.gitlab.io/ )
-
## Using host binaries
Another related trick is:
@@ -192,6 +214,29 @@ You'll need to
[manually configure autologin](https://docs.fedoraproject.org/en-US/fedora-coreos/tutorial-autologin/)
in the Ignition config, since kola won't be able to do it for you.
+## Performing an in-place OS update manually
+
+The output of coreos-assembler is conceptually two things:
+
+- an ostree container image
+- disk images (ISO, AWS AMI, qemu .qcow2, etc)
+
+In many cases, rather than booting from a new disk image with the new OS, you will
+want to explicitly test in-place upgrades. This uses an [ostree native container](https://fedoraproject.org/wiki/Changes/OstreeNativeContainer), which is in the form of an `.ociarchive` file generated
+by `cosa build container` (as well as the default `cosa build`, which *also* generates a `qemu` disk image).
+
+You will need to make the container image available to your targeted system (VM or physical). One
+way to do this is to push the container to a public registry such as quay.io:
+
+`cosa push-container quay.io/exampleuser/fcos`
+
+Performing an in-place update from the `cosa build` output boils down to invoking a command of the form:
+
+```
+$ rpm-ostree rebase --experimental ostree-unverified-registry:quay.io/exampleuser/fcos
+$ systemctl reboot
+```
+
## Using different CA certificates
If you need access to CA certificates on your host (for example, when you need to access
diff --git a/internal/pkg/cosash/cosash.go b/internal/pkg/cosash/cosash.go
index b270909b0ea8aa132e225e1062a6d121aa7796c6..da1b237d067123e73dfedd28c11ef1ae0ff47b80 100644
--- a/internal/pkg/cosash/cosash.go
+++ b/internal/pkg/cosash/cosash.go
@@ -137,6 +137,11 @@ func (r *CosaSh) ProcessWithReply(buf string) (string, error) {
if _, err := io.WriteString(r.input, buf); err != nil {
return "", err
}
+ if !strings.HasSuffix(buf, "\n") {
+ if _, err := io.WriteString(r.input, "\n"); err != nil {
+ return "", err
+ }
+ }
select {
case reply := <-r.replychan:
diff --git a/mantle/auth/azure.go b/mantle/auth/azure.go
index c08b72e30a967d28c4f64d743e64261c397aec95..29bdb64cf489b3b7a22374f0209a915bd79e009f 100644
--- a/mantle/auth/azure.go
+++ b/mantle/auth/azure.go
@@ -1,3 +1,4 @@
+// Copyright 2023 Red Hat
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,139 +17,54 @@ package auth
import (
"encoding/json"
- "fmt"
"io"
"os"
"os/user"
"path/filepath"
-
- "golang.org/x/text/encoding/unicode"
- "golang.org/x/text/transform"
-
- "github.com/coreos/coreos-assembler/mantle/platform"
)
const (
- AzureAuthPath = ".azure/credentials.json"
- AzureProfilePath = ".azure/azureProfile.json"
+ AzureCredentialsPath = ".azure/azureCreds.json"
)
-// A version of the Options struct from platform/api/azure that only
-// contains the ASM values. Otherwise there's a cyclical depdendence
-// because platform/api/azure has to import auth to have access to
-// the ReadAzureProfile function.
-type Options struct {
- *platform.Options
-
- SubscriptionName string
- SubscriptionID string
-
- // Azure Storage API endpoint suffix. If unset, the Azure SDK default will be used.
- StorageEndpointSuffix string
-}
-
-type azureEnvironment struct {
- Name string `json:"name"`
- StorageEndpointSuffix string `json:"storageEndpointSuffix"`
+type AzureCredentials struct {
+ ClientID string `json:"appId"`
+ ClientSecret string `json:"password"`
+ SubscriptionID string `json:"subscription"`
+ TenantID string `json:"tenant"`
}
-type azureSubscription struct {
- EnvironmentName string `json:"environmentName"`
- ID string `json:"id"`
- Name string `json:"name"`
-}
-
-// AzureProfile represents a parsed Azure Profile Configuration File.
-type AzureProfile struct {
- Environments []azureEnvironment `json:"environments"`
- Subscriptions []azureSubscription `json:"subscriptions"`
-}
-
-// AsOptions converts all subscriptions into a slice of Options.
-// If there is an environment with a name matching the subscription, that environment's storage endpoint will be copied to the options.
-func (ap *AzureProfile) asOptions() []Options {
- var o []Options
-
- for _, sub := range ap.Subscriptions {
- newo := Options{
- SubscriptionName: sub.Name,
- SubscriptionID: sub.ID,
- }
-
- // find the storage endpoint for the subscription
- for _, e := range ap.Environments {
- if e.Name == sub.EnvironmentName {
- newo.StorageEndpointSuffix = e.StorageEndpointSuffix
- break
- }
- }
-
- o = append(o, newo)
- }
-
- return o
-}
-
-// SubscriptionOptions returns the name subscription in the Azure profile as a Options struct.
-// If the subscription name is "", the first subscription is returned.
-// If there are no subscriptions or the named subscription is not found, SubscriptionOptions returns nil.
-func (ap *AzureProfile) SubscriptionOptions(name string) *Options {
- opts := ap.asOptions()
-
- if len(opts) == 0 {
- return nil
- }
-
- if name == "" {
- return &opts[0]
- } else {
- for _, o := range opts {
- if o.SubscriptionName == name {
- return &o
- }
- }
- }
-
- return nil
-}
-
-// ReadAzureProfile decodes an Azure Profile, as created by the Azure Cross-platform CLI.
+// ReadAzureCredentials picks up the credentials as described in the docs.
//
-// If path is empty, $HOME/.azure/azureProfile.json is read.
-func ReadAzureProfile(path string) (*AzureProfile, error) {
+// If path is empty, $AZURE_CREDENTIALS or $HOME/.azure/azureCreds.json is read.
+func ReadAzureCredentials(path string) (AzureCredentials, error) {
+ var azCreds AzureCredentials
if path == "" {
- user, err := user.Current()
- if err != nil {
- return nil, err
+ path = os.Getenv("AZURE_CREDENTIALS")
+ if path == "" {
+ user, err := user.Current()
+ if err != nil {
+ return azCreds, err
+ }
+ path = filepath.Join(user.HomeDir, AzureCredentialsPath)
}
-
- path = filepath.Join(user.HomeDir, AzureProfilePath)
}
- contents, err := decodeBOMFile(path)
+ f, err := os.Open(path)
if err != nil {
- return nil, err
- }
-
- var ap AzureProfile
- if err := json.Unmarshal(contents, &ap); err != nil {
- return nil, err
+ return azCreds, err
}
+ defer f.Close()
- if len(ap.Subscriptions) == 0 {
- return nil, fmt.Errorf("Azure profile %q contains no subscriptions", path)
+ content, err := io.ReadAll(f)
+ if err != nil {
+ return azCreds, err
}
- return &ap, nil
-}
-
-func decodeBOMFile(path string) ([]byte, error) {
- f, err := os.Open(path)
+ err = json.Unmarshal(content, &azCreds)
if err != nil {
- return nil, err
+ return azCreds, err
}
- defer f.Close()
- decoder := unicode.UTF8.NewDecoder()
- reader := transform.NewReader(f, unicode.BOMOverride(decoder))
- return io.ReadAll(reader)
+
+ return azCreds, nil
}
diff --git a/mantle/auth/google.go b/mantle/auth/google.go
index 767ec9c2fe01b566e0c497b13089242cd98f5fd8..025fc8660e3f704e039673624240a02c8c068ec0 100644
--- a/mantle/auth/google.go
+++ b/mantle/auth/google.go
@@ -26,7 +26,7 @@ import (
"golang.org/x/oauth2/google"
)
-const GCEConfigPath = ".config/gce.json"
+const GCPConfigPath = ".config/gcp.json"
var scopes = []string{
"https://www.googleapis.com/auth/devstorage.full_control",
@@ -34,7 +34,7 @@ var scopes = []string{
}
// GoogleServiceClient fetchs a token from Google Compute Engine's
-// metadata service. This should be used on GCE vms. The Default account
+// metadata service. This should be used on GCP vms. The Default account
// is used.
func GoogleServiceClient() *http.Client {
return &http.Client{
@@ -56,7 +56,7 @@ func GoogleClientFromKeyFile(path string, scope ...string) (*http.Client, error)
if err != nil {
return nil, err
}
- path = filepath.Join(user.HomeDir, GCEConfigPath)
+ path = filepath.Join(user.HomeDir, GCPConfigPath)
}
b, err := os.ReadFile(path)
if err != nil {
diff --git a/mantle/cmd/ore/aws/upload.go b/mantle/cmd/ore/aws/upload.go
index 880180926d6ec3414a442ef95ee2050b110ed8c2..9c3c8c78cf32c6c678444bd4f6e30c31e6fa3192 100644
--- a/mantle/cmd/ore/aws/upload.go
+++ b/mantle/cmd/ore/aws/upload.go
@@ -64,6 +64,9 @@ After a successful run, the final line of output will be a line of JSON describi
uploadGrantUsers []string
uploadGrantUsersSnapshot []string
uploadTags []string
+ uploadIMDSv2Only bool
+ uploadVolumeType string
+ uploadX86BootMode string
)
func init() {
@@ -85,6 +88,9 @@ func init() {
cmdUpload.Flags().StringSliceVar(&uploadGrantUsers, "grant-user", []string{}, "grant launch permission to this AWS user ID")
cmdUpload.Flags().StringSliceVar(&uploadGrantUsersSnapshot, "grant-user-snapshot", []string{}, "grant snapshot volume permission to this AWS user ID")
cmdUpload.Flags().StringSliceVar(&uploadTags, "tags", []string{}, "list of key=value tags to attach to the AMI")
+ cmdUpload.Flags().BoolVar(&uploadIMDSv2Only, "imdsv2-only", false, "enable IMDSv2-only support")
+ cmdUpload.Flags().StringVar(&uploadVolumeType, "volume-type", "gp3", "EBS volume type (gp3, gp2, io1, st1, sc1, standard, etc.)")
+ cmdUpload.Flags().StringVar(&uploadX86BootMode, "x86-boot-mode", "uefi-preferred", "Set boot mode (uefi-preferred, uefi)")
}
func defaultBucketNameForRegion(region string) string {
@@ -243,7 +249,7 @@ func runUpload(cmd *cobra.Command, args []string) error {
}
// create AMIs and grant permissions
- amiID, err := API.CreateHVMImage(sourceSnapshot, uploadDiskSizeGiB, uploadAMIName, uploadAMIDescription, uploadImageArchitecture)
+ amiID, err := API.CreateHVMImage(sourceSnapshot, uploadDiskSizeGiB, uploadAMIName, uploadAMIDescription, uploadImageArchitecture, uploadVolumeType, uploadIMDSv2Only, uploadX86BootMode)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to create HVM image: %v\n", err)
os.Exit(1)
diff --git a/mantle/cmd/ore/azure/azure.go b/mantle/cmd/ore/azure/azure.go
index 2013a5f62d853a374cd110fa82790d25c1e7e7a3..63d1dd6b45745cfb0389dedc00e26164360930e3 100644
--- a/mantle/cmd/ore/azure/azure.go
+++ b/mantle/cmd/ore/azure/azure.go
@@ -1,3 +1,4 @@
+// Copyright 2023 Red Hat
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -31,10 +32,8 @@ var (
Short: "azure image and vm utilities",
}
- azureProfile string
- azureAuth string
- azureSubscription string
- azureLocation string
+ azureCredentials string
+ azureLocation string
api *azure.API
)
@@ -43,9 +42,7 @@ func init() {
cli.WrapPreRun(Azure, preauth)
sv := Azure.PersistentFlags().StringVar
- sv(&azureProfile, "azure-profile", "", "Azure Profile json file")
- sv(&azureAuth, "azure-auth", "", "Azure auth location (default \"~/"+auth.AzureAuthPath+"\")")
- sv(&azureSubscription, "azure-subscription", "", "Azure subscription name. If unset, the first is used.")
+ sv(&azureCredentials, "azure-credentials", "", "Azure credentials file location (default \"~/"+auth.AzureCredentialsPath+"\")")
sv(&azureLocation, "azure-location", "westus", "Azure location (default \"westus\")")
}
@@ -53,10 +50,8 @@ func preauth(cmd *cobra.Command, args []string) error {
plog.Printf("Creating Azure API...")
a, err := azure.New(&azure.Options{
- AzureProfile: azureProfile,
- AzureAuthLocation: azureAuth,
- AzureSubscription: azureSubscription,
- Location: azureLocation,
+ AzureCredentials: azureCredentials,
+ Location: azureLocation,
})
if err != nil {
plog.Fatalf("Failed to create Azure API: %v", err)
diff --git a/mantle/cmd/ore/azure/delete-blob.go b/mantle/cmd/ore/azure/delete-blob.go
index ae8e391fdffa4d35ebf6c2efb30a74b031f490c2..7553d16ae3e3be6594a6a59908f3a8e01650945f 100644
--- a/mantle/cmd/ore/azure/delete-blob.go
+++ b/mantle/cmd/ore/azure/delete-blob.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Red Hat
+// Copyright 2023 Red Hat
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -52,13 +52,13 @@ func runDeleteBlob(cmd *cobra.Command, args []string) {
if err != nil {
plog.Fatalf("Fetching storage service keys failed: %v", err)
}
-
- if kr.Keys == nil || len(*kr.Keys) == 0 {
+ if kr.Keys == nil || len(kr.Keys) == 0 {
plog.Fatalf("No storage service keys found")
}
+ k := kr.Keys
+ key := k[0].Value
- k := (*kr.Keys)[0]
- exists, err := api.BlobExists(dbo.storageacct, *k.Value, dbo.container, dbo.blob)
+ exists, err := api.PageBlobExists(dbo.storageacct, *key, dbo.container, dbo.blob)
if err != nil {
plog.Fatalf("Checking if blob exists failed: %v", err)
}
@@ -67,7 +67,7 @@ func runDeleteBlob(cmd *cobra.Command, args []string) {
plog.Infof("Blob doesn't exist. No need to delete.")
} else {
plog.Infof("Deleting blob.")
- err = api.DeleteBlob(dbo.storageacct, *k.Value, dbo.container, dbo.blob)
+ err = api.DeletePageBlob(dbo.storageacct, *key, dbo.container, dbo.blob)
if err != nil {
plog.Fatalf("Deleting blob failed: %v", err)
}
diff --git a/mantle/cmd/ore/azure/upload-blob.go b/mantle/cmd/ore/azure/upload-blob.go
index 1d211a41af8e80f007e62e84e3f595cdb8e60121..cdaca61d66c702525a6130c59efb442228bf3402 100644
--- a/mantle/cmd/ore/azure/upload-blob.go
+++ b/mantle/cmd/ore/azure/upload-blob.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Red Hat
+// Copyright 2023 Red Hat
// Copyright 2018 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,7 +21,6 @@ import (
"os"
"strings"
- "github.com/Microsoft/azure-vhd-utils/vhdcore/validator"
"github.com/spf13/cobra"
)
@@ -81,29 +80,28 @@ func runUploadBlob(cmd *cobra.Command, args []string) {
if !strings.HasSuffix(strings.ToLower(ubo.vhd), ".vhd") {
plog.Fatalf("Image should end with .vhd")
}
-
- if err := validator.ValidateVhd(ubo.vhd); err != nil {
- plog.Fatal(err)
- }
-
- if err := validator.ValidateVhdSize(ubo.vhd); err != nil {
- plog.Fatal(err)
- }
}
kr, err := api.GetStorageServiceKeys(ubo.storageacct, resourceGroup)
if err != nil {
plog.Fatalf("Fetching storage service keys failed: %v", err)
}
-
- if kr.Keys == nil || len(*kr.Keys) == 0 {
+ if kr.Keys == nil || len(kr.Keys) == 0 {
plog.Fatalf("No storage service keys found")
}
+ k := kr.Keys
+ key := k[0].Value
- //only use the first service key to avoid uploading twice
- //see https://github.com/coreos/coreos-assembler/pull/1849
- k := (*kr.Keys)[0]
- if err := api.UploadBlob(ubo.storageacct, *k.Value, ubo.vhd, ubo.container, ubo.blob, ubo.overwrite); err != nil {
+ exists, err := api.PageBlobExists(ubo.storageacct, *key, ubo.container, ubo.blob)
+ if err != nil {
+ plog.Fatalf("Detecting if blob exists failed: %v", err)
+ }
+ if exists && !ubo.overwrite {
+ plog.Fatalf("The blob exists. Pass --overwrite to force upload.")
+ }
+
+ err = api.UploadPageBlob(ubo.storageacct, *key, ubo.vhd, ubo.container, ubo.blob)
+ if err != nil {
plog.Fatalf("Uploading blob failed: %v", err)
}
diff --git a/mantle/cmd/ore/gcloud/create-image.go b/mantle/cmd/ore/gcloud/create-image.go
index 5b3cc9bac0e777e31d187ab1d564f495a22da33b..b0001fffbdab880da066378f596a757a720d4d81 100644
--- a/mantle/cmd/ore/gcloud/create-image.go
+++ b/mantle/cmd/ore/gcloud/create-image.go
@@ -31,11 +31,12 @@ import (
var (
cmdCreateImage = &cobra.Command{
Use: "create-image",
- Short: "Create GCE image",
- Long: "Create GCE image from an existing file in Google Storage",
+ Short: "Create GCP image",
+ Long: "Create GCP image from an existing file in Google Storage",
Run: runCreateImage,
}
+ createImageArch string
createImageFamily string
createImageBoard string
createImageVersion string
@@ -46,8 +47,10 @@ var (
func init() {
user := os.Getenv("USER")
+ cmdCreateImage.Flags().StringVar(&createImageArch, "arch",
+ "", "The architecture of the image")
cmdCreateImage.Flags().StringVar(&createImageFamily, "family",
- user, "GCE image group and name prefix")
+ user, "GCP image group and name prefix")
cmdCreateImage.Flags().StringVar(&createImageBoard, "board",
"amd64-usr", "OS board name")
cmdCreateImage.Flags().StringVar(&createImageVersion, "version",
@@ -56,10 +59,10 @@ func init() {
"gs://users.developer.core-os.net/"+user+"/boards",
"Storage URL prefix")
cmdCreateImage.Flags().StringVar(&createImageName, "source-name",
- "coreos_production_gce.tar.gz",
+ "coreos_production_gcp.tar.gz",
"Storage image name")
cmdCreateImage.Flags().BoolVar(&createImageForce, "force",
- false, "overwrite existing GCE images without prompt")
+ false, "overwrite existing GCP images without prompt")
GCloud.AddCommand(cmdCreateImage)
}
@@ -91,7 +94,7 @@ func runCreateImage(cmd *cobra.Command, args []string) {
bucket := gsURL.Host
imageNameGS := strings.TrimPrefix(path.Join(gsURL.Path,
createImageBoard, createImageVersion, createImageName), "/")
- imageNameGCE := gceSanitize(createImageFamily + "-" + createImageVersion)
+ imageNameGCP := gcpSanitize(createImageFamily + "-" + createImageVersion)
ctx := context.Background()
storageAPI, err := storage.NewService(ctx)
@@ -111,19 +114,20 @@ func runCreateImage(cmd *cobra.Command, args []string) {
os.Exit(1)
}
- fmt.Printf("Creating image in GCE: %v...\n", imageNameGCE)
+ fmt.Printf("Creating image in GCP: %v...\n", imageNameGCP)
- // create image on gce
+ // create image on gcp
storageSrc := fmt.Sprintf("https://storage.googleapis.com/%v/%v", bucket, imageNameGS)
_, pending, err := api.CreateImage(&gcloud.ImageSpec{
- Name: imageNameGCE,
- SourceImage: storageSrc,
+ Architecture: createImageArch,
+ Name: imageNameGCP,
+ SourceImage: storageSrc,
}, createImageForce)
if err == nil {
err = pending.Wait()
}
if err != nil {
- fmt.Fprintf(os.Stderr, "Creating GCE image failed: %v\n", err)
+ fmt.Fprintf(os.Stderr, "Creating GCP image failed: %v\n", err)
os.Exit(1)
}
}
diff --git a/mantle/cmd/ore/gcloud/delete-images.go b/mantle/cmd/ore/gcloud/delete-images.go
index a69ee0a70794383a1ff8205e00981c41e123c0a7..fe2f1773c57977bd5d2470969370907c0b421e30 100644
--- a/mantle/cmd/ore/gcloud/delete-images.go
+++ b/mantle/cmd/ore/gcloud/delete-images.go
@@ -26,7 +26,7 @@ import (
var (
cmdDeleteImage = &cobra.Command{
Use: "delete-images ...",
- Short: "Delete GCE images",
+ Short: "Delete GCP images",
Run: runDeleteImage,
}
)
diff --git a/mantle/cmd/ore/gcloud/destroy.go b/mantle/cmd/ore/gcloud/destroy.go
index 39a327f8105065d727b43c2fb202ec50364170f5..ea87467c3eee9950338c55553ebec3360900dccf 100644
--- a/mantle/cmd/ore/gcloud/destroy.go
+++ b/mantle/cmd/ore/gcloud/destroy.go
@@ -24,8 +24,8 @@ import (
var (
cmdDestroy = &cobra.Command{
Use: "destroy-instances --prefix= ",
- Short: "destroy cluster on GCE",
- Long: "Destroy GCE instances based on name prefix.",
+ Short: "destroy cluster on GCP",
+ Long: "Destroy GCP instances based on name prefix.",
Run: runDestroy,
}
)
diff --git a/mantle/cmd/ore/gcloud/gc.go b/mantle/cmd/ore/gcloud/gc.go
index 75b7517594cb4de47784e93669116e824e8454c0..4ac82fc7cd9d0875f7f346b0a36fe8beb13c39b7 100644
--- a/mantle/cmd/ore/gcloud/gc.go
+++ b/mantle/cmd/ore/gcloud/gc.go
@@ -25,7 +25,7 @@ import (
var (
cmdGC = &cobra.Command{
Use: "gc",
- Short: "GC resources in GCE",
+ Short: "GC resources in GCP",
Long: `Delete instances created over the given duration ago.`,
RunE: runGC,
diff --git a/mantle/cmd/ore/gcloud/gcloud.go b/mantle/cmd/ore/gcloud/gcloud.go
index 09428dab728a6a032b8a2b8d6eece3faf0e1343f..5ae27aa543d60ba2338d9f734d02f2f4d5928013 100644
--- a/mantle/cmd/ore/gcloud/gcloud.go
+++ b/mantle/cmd/ore/gcloud/gcloud.go
@@ -24,7 +24,7 @@ import (
)
var (
- plog = capnslog.NewPackageLogger("github.com/coreos/coreos-assembler/mantle", "ore/gce")
+ plog = capnslog.NewPackageLogger("github.com/coreos/coreos-assembler/mantle", "ore/gcp")
GCloud = &cobra.Command{
Use: "gcloud [command]",
@@ -47,7 +47,7 @@ func init() {
sv(&opts.BaseName, "basename", "kola", "instance name prefix")
sv(&opts.Network, "network", "default", "network name")
sv(&opts.JSONKeyFile, "json-key", "", "use a service account's JSON key for authentication")
- GCloud.PersistentFlags().BoolVar(&opts.ServiceAuth, "service-auth", false, "use non-interactive auth when running within GCE")
+ GCloud.PersistentFlags().BoolVar(&opts.ServiceAuth, "service-auth", false, "use non-interactive auth when running within GCP")
cli.WrapPreRun(GCloud, preauth)
}
diff --git a/mantle/cmd/ore/gcloud/image.go b/mantle/cmd/ore/gcloud/image.go
index e073b4e1084c1101bc8b75f6a515aa855c83e25d..11fa49b31a3e3d4eb4b9341212fe78701669b939 100644
--- a/mantle/cmd/ore/gcloud/image.go
+++ b/mantle/cmd/ore/gcloud/image.go
@@ -25,7 +25,7 @@ import (
var (
cmdImage = &cobra.Command{
Use: "list-images --prefix=",
- Short: "List images in GCE",
+ Short: "List images in GCP",
Run: runImage,
}
diff --git a/mantle/cmd/ore/gcloud/list.go b/mantle/cmd/ore/gcloud/list.go
index 44a0fbe532d21ca9fa0aa0ae478a6a901289cad8..9a14ef286861e54fb3d02376b589fc9e70a34fae 100644
--- a/mantle/cmd/ore/gcloud/list.go
+++ b/mantle/cmd/ore/gcloud/list.go
@@ -26,7 +26,7 @@ import (
var (
cmdList = &cobra.Command{
Use: "list-instances --prefix=",
- Short: "List instances on GCE",
+ Short: "List instances on GCP",
Run: runList,
}
)
diff --git a/mantle/cmd/ore/gcloud/upload.go b/mantle/cmd/ore/gcloud/upload.go
index f20ad604c11bbdea13688f064e84635dc8a58c55..9c2d0a809e1334864d1c0c0c5d15d5f591e4fc86 100644
--- a/mantle/cmd/ore/gcloud/upload.go
+++ b/mantle/cmd/ore/gcloud/upload.go
@@ -33,7 +33,7 @@ var (
cmdUpload = &cobra.Command{
Use: "upload",
Short: "Upload os image",
- Long: "Upload os image to Google Storage bucket and create image in GCE. Intended for use in SDK.",
+ Long: "Upload os image to Google Storage bucket and create image in GCP. Intended for use in SDK.",
Run: runUpload,
}
@@ -42,6 +42,7 @@ var (
uploadFile string
uploadForce bool
uploadWriteUrl string
+ uploadImageArch string
uploadImageFamily string
uploadImageDescription string
uploadCreateImage bool
@@ -62,9 +63,9 @@ func init() {
if err := cmdUpload.MarkFlagRequired("file"); err != nil {
panic(err)
}
- cmdUpload.Flags().BoolVar(&uploadForce, "force", false, "overwrite existing GS and GCE images without prompt")
+ cmdUpload.Flags().BoolVar(&uploadForce, "force", false, "overwrite existing GS and GCP images without prompt")
cmdUpload.Flags().StringVar(&uploadWriteUrl, "write-url", "", "output the uploaded URL to the named file")
- cmdUpload.Flags().StringVar(&uploadImageFamily, "family", "", "GCP image family to attach image to")
+ cmdUpload.Flags().StringVar(&uploadImageArch, "arch", "", "The architecture of the image")
cmdUpload.Flags().StringVar(&uploadImageDescription, "description", "", "The description that should be attached to the image")
cmdUpload.Flags().BoolVar(&uploadCreateImage, "create-image", true, "Create an image in GCP after uploading")
cmdUpload.Flags().BoolVar(&uploadPublic, "public", false, "Set public ACLs on image")
@@ -101,8 +102,8 @@ func runUpload(cmd *cobra.Command, args []string) {
uploadBucket = gsURL.Host
imageNameGS := strings.TrimPrefix(gsURL.Path+"/"+uploadImageName, "/") + ".tar.gz"
- // Sanitize the image name for GCE
- imageNameGCE := gceSanitize(uploadImageName)
+ // Sanitize the image name for GCP
+ imageNameGCP := gcpSanitize(uploadImageName)
ctx := context.Background()
storageAPI, err := storage.NewService(ctx, option.WithHTTPClient(api.Client()))
@@ -143,12 +144,13 @@ func runUpload(cmd *cobra.Command, args []string) {
imageStorageURL := fmt.Sprintf("https://storage.googleapis.com/%v/%v", uploadBucket, imageNameGS)
if uploadCreateImage {
- fmt.Printf("Creating image in GCE: %v...\n", imageNameGCE)
+ fmt.Printf("Creating image in GCP: %v...\n", imageNameGCP)
spec := &gcloud.ImageSpec{
- Name: imageNameGCE,
- Family: uploadImageFamily,
- SourceImage: imageStorageURL,
- Description: uploadImageDescription,
+ Architecture: uploadImageArch,
+ Name: imageNameGCP,
+ Family: uploadImageFamily,
+ SourceImage: imageStorageURL,
+ Description: uploadImageDescription,
}
if len(uploadImageLicenses) > 0 {
spec.Licenses = uploadImageLicenses
@@ -161,7 +163,7 @@ func runUpload(cmd *cobra.Command, args []string) {
// if image already exists ask to delete and try again
if err != nil && strings.HasSuffix(err.Error(), "alreadyExists") {
var ans string
- fmt.Printf("Image %v already exists on GCE. Overwrite? (y/n):", imageNameGCE)
+ fmt.Printf("Image %v already exists on GCP. Overwrite? (y/n):", imageNameGCP)
if _, err = fmt.Scan(&ans); err != nil {
fmt.Fprintf(os.Stderr, "Scanning overwrite input: %v", err)
os.Exit(1)
@@ -174,25 +176,25 @@ func runUpload(cmd *cobra.Command, args []string) {
err = pending.Wait()
}
if err != nil {
- fmt.Fprintf(os.Stderr, "Creating GCE image failed: %v\n", err)
+ fmt.Fprintf(os.Stderr, "Creating GCP image failed: %v\n", err)
os.Exit(1)
}
- fmt.Printf("Image %v sucessfully created in GCE\n", imageNameGCE)
+ fmt.Printf("Image %v sucessfully created in GCP\n", imageNameGCP)
default:
- fmt.Println("Skipped GCE image creation")
+ fmt.Println("Skipped GCP image creation")
}
}
if err != nil {
- fmt.Fprintf(os.Stderr, "Creating GCE image failed: %v\n", err)
+ fmt.Fprintf(os.Stderr, "Creating GCP image failed: %v\n", err)
os.Exit(1)
}
// If requested, set the image ACL to public
if uploadPublic {
- fmt.Printf("Setting image to have public access: %v\n", imageNameGCE)
- err = api.SetImagePublic(imageNameGCE)
+ fmt.Printf("Setting image to have public access: %v\n", imageNameGCP)
+ err = api.SetImagePublic(imageNameGCP)
if err != nil {
- fmt.Fprintf(os.Stderr, "Marking GCE image with public ACLs failed: %v\n", err)
+ fmt.Fprintf(os.Stderr, "Marking GCP image with public ACLs failed: %v\n", err)
os.Exit(1)
}
}
@@ -208,11 +210,11 @@ func runUpload(cmd *cobra.Command, args []string) {
}
-// Converts an image name from Google Storage to an equivalent GCE image
-// name. NOTE: Not a fully generlized sanitizer for GCE. Designed for
+// Converts an image name from Google Storage to an equivalent GCP image
+// name. NOTE: Not a fully generlized sanitizer for GCP. Designed for
// the default version.txt name (ex: 633.1.0+2015-03-31-1538). See:
// https://godoc.org/google.golang.org/api/compute/v1#Image
-func gceSanitize(name string) string {
+func gcpSanitize(name string) string {
if name == "" {
return name
}
diff --git a/mantle/cmd/plume/cosa2stream.go b/mantle/cmd/plume/cosa2stream.go
index ed3b1c7b75c4681c3cc555dd6f1a1855b63a3092..8e35ea62d2d63ff4d5269b76c7d0813af2042358 100644
--- a/mantle/cmd/plume/cosa2stream.go
+++ b/mantle/cmd/plume/cosa2stream.go
@@ -33,7 +33,7 @@ import (
const (
// This will hopefully migrate to mirror.openshift.com, see https://github.com/openshift/os/issues/477
- rhcosCosaEndpoint = "https://rhcos.mirror.openshift.com/art/storage/releases"
+ rhcosCosaEndpoint = "https://rhcos.mirror.openshift.com/art/storage/prod/streams"
)
var (
@@ -105,7 +105,6 @@ func runCosaBuildToStream(cmd *cobra.Command, args []string) error {
if err != nil {
return err
}
- var archStreamName = streamName
if !strings.HasPrefix(arg, "https://") {
if distro != "rhcos" {
return errors.New("Arguments must be https:// URLs (or with --distro rhcos, ARCH=VERSION)")
@@ -116,24 +115,24 @@ func runCosaBuildToStream(cmd *cobra.Command, args []string) error {
}
arch := parts[0]
ver := parts[1]
- // Convert e.g. 48.82. to rhcos-4.8
+ // Convert e.g. 48.82. to 4.8
verSplit := strings.Split(ver, ".")
- archStreamName = fmt.Sprintf("rhcos-%s.%s", verSplit[0][0:1], verSplit[0][1:])
- if arch != "x86_64" {
- archStreamName += "-" + arch
+ if streamName == "" {
+ streamName = fmt.Sprintf("%s.%s", verSplit[0][0:1], verSplit[0][1:])
}
+
endpoint := rhcosCosaEndpoint
if streamBaseURL != "" {
endpoint = streamBaseURL
}
- base := fmt.Sprintf("%s/%s", endpoint, archStreamName)
- u := fmt.Sprintf("%s/%s/%s/meta.json", base, ver, arch)
+ base := fmt.Sprintf("%s/%s", endpoint, streamName)
+ u := fmt.Sprintf("%s/builds/%s/%s/meta.json", base, ver, arch)
arg = u
childArgs = append(childArgs, "--stream-baseurl="+endpoint)
}
cosaArgs := append([]string{}, childArgs...)
cosaArgs = append(cosaArgs, "--url="+arg)
- cosaArgs = append(cosaArgs, "--stream-name="+archStreamName)
+ cosaArgs = append(cosaArgs, "--stream-name="+streamName)
cosaArgs = append(cosaArgs, "--output="+releaseTmpf.Name())
c := exec.Command("cosa", cosaArgs...)
c.Stderr = os.Stderr
diff --git a/mantle/cmd/plume/release.go b/mantle/cmd/plume/release.go
index 9a866c761b90c91d933aa3c9fb7c7b8600201388..17d966b77cb93130bc52007735c3fca90fffab4e 100644
--- a/mantle/cmd/plume/release.go
+++ b/mantle/cmd/plume/release.go
@@ -20,7 +20,9 @@ import (
"fmt"
"io"
"net/url"
+ "os"
"path/filepath"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -31,45 +33,49 @@ import (
var (
awsCredentialsFile string
- selectedDistro string
- specBucket string
- specPolicy string
specProfile string
specRegion string
specStream string
specVersion string
- cmdRelease = &cobra.Command{
- Use: "release [options]",
- Short: "Publish a new CoreOS release.",
- Run: runRelease,
- Long: `Publish a new CoreOS release.`,
+ specBucketPrefix string
+
+ cmdMakeAmisPublic = &cobra.Command{
+ Use: "make-amis-public [options]",
+ Short: "Make the AMIs of a CoreOS release public.",
+ Run: runMakeAmisPublic,
+ Long: "Make the AMIs of a CoreOS release public.",
+ }
+
+ cmdUpdateReleaseIndex = &cobra.Command{
+ Use: "update-release-index [options]",
+ Short: "Update a stream's release index for a CoreOS release.",
+ Run: runUpdateReleaseIndex,
+ Long: "Update a stream's release index for a CoreOS release.",
}
)
func init() {
- cmdRelease.Flags().StringVar(&awsCredentialsFile, "aws-credentials", "", "AWS credentials file")
- cmdRelease.Flags().StringVar(&selectedDistro, "distro", "fcos", "system to release")
- cmdRelease.Flags().StringVar(&specBucket, "bucket", "fcos-builds", "S3 bucket")
- cmdRelease.Flags().StringVar(&specPolicy, "policy", "public-read", "Canned ACL policy")
- cmdRelease.Flags().StringVar(&specProfile, "profile", "default", "AWS profile")
- cmdRelease.Flags().StringVar(&specRegion, "region", "us-east-1", "S3 bucket region")
- cmdRelease.Flags().StringVarP(&specStream, "stream", "S", "testing", "target stream")
- cmdRelease.Flags().StringVarP(&specVersion, "version", "V", "", "release version")
- root.AddCommand(cmdRelease)
-}
+ cmdMakeAmisPublic.Flags().StringVar(&awsCredentialsFile, "aws-credentials", "", "AWS credentials file")
+ cmdMakeAmisPublic.Flags().StringVar(&specBucketPrefix, "bucket-prefix", "", "S3 bucket and prefix")
+ cmdMakeAmisPublic.Flags().StringVar(&specProfile, "profile", "default", "AWS profile")
+ cmdMakeAmisPublic.Flags().StringVar(&specRegion, "region", "us-east-1", "S3 bucket region")
+ cmdMakeAmisPublic.Flags().StringVarP(&specStream, "stream", "", "", "target stream")
+ cmdMakeAmisPublic.Flags().StringVarP(&specVersion, "version", "", "", "release version")
+ root.AddCommand(cmdMakeAmisPublic)
+
+ cmdUpdateReleaseIndex.Flags().StringVar(&awsCredentialsFile, "aws-credentials", "", "AWS credentials file")
+ cmdUpdateReleaseIndex.Flags().StringVar(&specBucketPrefix, "bucket-prefix", "", "S3 bucket and prefix")
+ cmdUpdateReleaseIndex.Flags().StringVar(&specProfile, "profile", "default", "AWS profile")
+ cmdUpdateReleaseIndex.Flags().StringVar(&specRegion, "region", "us-east-1", "S3 bucket region")
+ cmdUpdateReleaseIndex.Flags().StringVarP(&specStream, "stream", "", "", "target stream")
+ cmdUpdateReleaseIndex.Flags().StringVarP(&specVersion, "version", "", "", "release version")
+ root.AddCommand(cmdUpdateReleaseIndex)
-func runRelease(cmd *cobra.Command, args []string) {
- switch selectedDistro {
- case "fcos":
- runFcosRelease(cmd, args)
- default:
- plog.Fatalf("Unknown distro %q:", selectedDistro)
- }
}
-func runFcosRelease(cmd *cobra.Command, args []string) {
+func validateArgs(args []string) {
if len(args) > 0 {
plog.Fatal("No args accepted")
}
@@ -79,18 +85,32 @@ func runFcosRelease(cmd *cobra.Command, args []string) {
if specStream == "" {
plog.Fatal("--stream is required")
}
- if specBucket == "" {
- plog.Fatal("--bucket is required")
+ if specBucketPrefix == "" {
+ plog.Fatal("--bucket-prefix is required")
}
if specRegion == "" {
plog.Fatal("--region is required")
}
+}
+
+func runMakeAmisPublic(cmd *cobra.Command, args []string) {
+ validateArgs(args)
+ api := getAWSApi()
+ rel := getReleaseMetadata(api)
+ incomplete := makeReleaseAMIsPublic(rel)
+ if incomplete {
+ os.Exit(77)
+ }
+}
- doS3()
- modifyReleaseMetadataIndex()
+func runUpdateReleaseIndex(cmd *cobra.Command, args []string) {
+ validateArgs(args)
+ api := getAWSApi()
+ rel := getReleaseMetadata(api)
+ modifyReleaseMetadataIndex(api, rel)
}
-func doS3() {
+func getAWSApi() *aws.API {
api, err := aws.New(&aws.Options{
CredentialsFile: awsCredentialsFile,
Profile: specProfile,
@@ -100,33 +120,99 @@ func doS3() {
plog.Fatalf("creating aws client: %v", err)
}
- // Assumes the bucket layout defined inside of
- // https://github.com/coreos/fedora-coreos-tracker/issues/189
- err = api.UpdateBucketObjectsACL(specBucket, filepath.Join("prod", "streams", specStream, "builds", specVersion), specPolicy)
- if err != nil {
- plog.Fatalf("updating object ACLs: %v", err)
+ return api
+}
+
+func getBucketAndStreamPrefix() (string, string) {
+ split := strings.SplitN(specBucketPrefix, "/", 2)
+ if len(split) != 2 {
+ plog.Fatalf("can't split %q into bucket and prefix", specBucketPrefix)
}
+ return split[0], split[1]
}
-func modifyReleaseMetadataIndex() {
- api, err := aws.New(&aws.Options{
- CredentialsFile: awsCredentialsFile,
- Profile: specProfile,
- Region: specRegion,
- })
+func getReleaseMetadata(api *aws.API) release.Release {
+ bucket, prefix := getBucketAndStreamPrefix()
+ releasePath := filepath.Join(prefix, "builds", specVersion, "release.json")
+ releaseFile, err := api.DownloadFile(bucket, releasePath)
if err != nil {
- plog.Fatalf("creating aws client: %v", err)
+ plog.Fatalf("downloading release metadata at %s: %v", releasePath, err)
+ }
+ defer releaseFile.Close()
+
+ releaseData, err := io.ReadAll(releaseFile)
+ if err != nil {
+ plog.Fatalf("reading release metadata: %v", err)
+ }
+
+ var rel release.Release
+ err = json.Unmarshal(releaseData, &rel)
+ if err != nil {
+ plog.Fatalf("unmarshaling release metadata: %v", err)
+ }
+
+ return rel
+}
+
+func makeReleaseAMIsPublic(rel release.Release) bool {
+ at_least_one_tried := false
+ at_least_one_passed := false
+ at_least_one_failed := false
+ for _, archs := range rel.Architectures {
+ awsmedia := archs.Media.Aws
+ if awsmedia == nil {
+ continue
+ }
+ for region, ami := range awsmedia.Images {
+ at_least_one_tried = true
+
+ aws_api, err := aws.New(&aws.Options{
+ CredentialsFile: awsCredentialsFile,
+ Profile: specProfile,
+ Region: region,
+ })
+ if err != nil {
+ plog.Warningf("creating AWS API for region %s modifying launch permissions: %v", region, err)
+ at_least_one_failed = true
+ continue
+ }
+
+ plog.Noticef("making AMI %s in region %s public", ami.Image, region)
+ err = aws_api.PublishImage(ami.Image)
+ if err != nil {
+ plog.Warningf("couldn't publish image in %v: %v", region, err)
+ at_least_one_failed = true
+ continue
+ }
+
+ at_least_one_passed = true
+ }
+ }
+
+ if !at_least_one_tried {
+ // if none were found, then we no-op
+ return false
+ } else if !at_least_one_passed {
+ // if none passed, then it's likely a more fundamental issue like wrong
+ // permissions or API usage, etc... let's just hard fail in that case
+ plog.Fatal("failed to make AMIs public in all regions")
}
+ // all passed or some failed
+ return at_least_one_failed
+}
+
+func modifyReleaseMetadataIndex(api *aws.API, rel release.Release) {
// Note we use S3 directly here instead of
// FetchAndParseCanonicalReleaseIndex(), since that one uses the
// CloudFronted URL and we need to be sure we're operating on the latest
// version. Plus we need S3 creds anyway later on to push the modified
// release index back.
- path := filepath.Join("prod", "streams", specStream, "releases.json")
+ bucket, prefix := getBucketAndStreamPrefix()
+ path := filepath.Join(prefix, "releases.json")
data, err := func() ([]byte, error) {
- f, err := api.DownloadFile(specBucket, path)
+ f, err := api.DownloadFile(bucket, path)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "NoSuchKey" {
@@ -152,29 +238,13 @@ func modifyReleaseMetadataIndex() {
plog.Fatalf("unmarshaling release metadata json: %v", err)
}
- releasePath := filepath.Join("prod", "streams", specStream, "builds", specVersion, "release.json")
+ // XXX: switch the URL to be relative so we don't have to hardcode its final location?
+ releasePath := filepath.Join(prefix, "builds", specVersion, "release.json")
url, err := url.Parse(fmt.Sprintf("https://builds.coreos.fedoraproject.org/%s", releasePath))
if err != nil {
plog.Fatalf("creating metadata url: %v", err)
}
- releaseFile, err := api.DownloadFile(specBucket, releasePath)
- if err != nil {
- plog.Fatalf("downloading release metadata at %s: %v", releasePath, err)
- }
- defer releaseFile.Close()
-
- releaseData, err := io.ReadAll(releaseFile)
- if err != nil {
- plog.Fatalf("reading release metadata: %v", err)
- }
-
- var rel release.Release
- err = json.Unmarshal(releaseData, &rel)
- if err != nil {
- plog.Fatalf("unmarshaling release metadata: %v", err)
- }
-
var commits []release.IndexReleaseCommit
for arch, vals := range rel.Architectures {
commits = append(commits, release.IndexReleaseCommit{
@@ -198,6 +268,7 @@ func modifyReleaseMetadataIndex() {
comp := compareCommits(rel.Commits, newIdxRelease.Commits)
if comp == 0 {
// the build is already the latest release, exit
+ plog.Notice("build is already present and is the latest release")
return
} else if comp == -1 {
// the build is present and contains a subset of the new release data,
@@ -211,28 +282,6 @@ func modifyReleaseMetadataIndex() {
}
}
- for _, archs := range rel.Architectures {
- awsmedia := archs.Media.Aws
- if awsmedia == nil {
- continue
- }
- for region, ami := range awsmedia.Images {
- aws_api, err := aws.New(&aws.Options{
- CredentialsFile: awsCredentialsFile,
- Profile: specProfile,
- Region: region,
- })
- if err != nil {
- plog.Fatalf("creating AWS API for modifying launch permissions: %v", err)
- }
-
- err = aws_api.PublishImage(ami.Image)
- if err != nil {
- plog.Fatalf("couldn't publish image in %v: %v", region, err)
- }
- }
- }
-
releaseIdx.Releases = append(releaseIdx.Releases, newIdxRelease)
releaseIdx.Metadata.LastModified = time.Now().UTC().Format("2006-01-02T15:04:05Z")
@@ -246,7 +295,7 @@ func modifyReleaseMetadataIndex() {
// we don't want this to be cached for very long so that e.g. Cincinnati picks it up quickly
var releases_max_age = 60 * 5
- err = api.UploadObjectExt(bytes.NewReader(out), specBucket, path, true, specPolicy, aws.ContentTypeJSON, releases_max_age)
+ err = api.UploadObjectExt(bytes.NewReader(out), bucket, path, true, "public-read", aws.ContentTypeJSON, releases_max_age)
if err != nil {
plog.Fatalf("uploading release metadata json: %v", err)
}
diff --git a/mantle/harness/_example/doc.go b/mantle/harness/_example/doc.go
index 87a25e939aa5da31aa868a24e1033ad2ac69ab27..e75c1448b454bb153af45495f675fbff4adf66cc 100644
--- a/mantle/harness/_example/doc.go
+++ b/mantle/harness/_example/doc.go
@@ -27,5 +27,4 @@
// --- SKIP: SkipIt (0.00s)
// main.go:40: Missing "TEST_DATA_else" in environment.
// PASS
-//
package main
diff --git a/mantle/harness/doc.go b/mantle/harness/doc.go
index 7a1d0778136a2d46107648be3a2e8c8ae9123dc6..4650dcfbe00b6161f24ed3498fa11099c03d4ee1 100644
--- a/mantle/harness/doc.go
+++ b/mantle/harness/doc.go
@@ -23,27 +23,28 @@
//
// Tests may be skipped if not applicable with a call to
// the Skip method of *H:
-// func NeedsSomeData(h *harness.H) {
-// if os.Getenv("SOME_DATA") == "" {
-// h.Skip("skipping test due to missing SOME_DATA")
-// }
-// ...
-// }
//
-// Subtests
+// func NeedsSomeData(h *harness.H) {
+// if os.Getenv("SOME_DATA") == "" {
+// h.Skip("skipping test due to missing SOME_DATA")
+// }
+// ...
+// }
+//
+// # Subtests
//
// The Run method of H allow defining subtests,
// without having to define separate functions for each. This enables uses
// like table-driven and hierarchical tests.
// It also provides a way to share common setup and tear-down code:
//
-// func Foo(h *harness.H) {
-// //
-// h.Run("A=1", func(h *harness.H) { ... })
-// h.Run("A=2", func(h *harness.H) { ... })
-// h.Run("B=1", func(h *harness.H) { ... })
-// //
-// }
+// func Foo(h *harness.H) {
+// //
+// h.Run("A=1", func(h *harness.H) { ... })
+// h.Run("A=2", func(h *harness.H) { ... })
+// h.Run("B=1", func(h *harness.H) { ... })
+// //
+// }
//
// Each subtest has a unique name: the combination of the name
// of the top-level test and the sequence of names passed to Run, separated by
@@ -56,40 +57,40 @@
// empty expression matches any string.
// For example, using "matching" to mean "whose name contains":
//
-// go run foo.go -harness.run '' # Run all tests.
-// go run foo.go -harness.run Foo # Run top-level tests matching "Foo", such as "TestFooBar".
-// go run foo.go -harness.run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=".
-// go run foo.go -harness.run /A=1 # For all top-level tests, run subtests matching "A=1".
+// go run foo.go -harness.run '' # Run all tests.
+// go run foo.go -harness.run Foo # Run top-level tests matching "Foo", such as "TestFooBar".
+// go run foo.go -harness.run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=".
+// go run foo.go -harness.run /A=1 # For all top-level tests, run subtests matching "A=1".
//
// Subtests can also be used to control parallelism. A parent test will only
// complete once all of its subtests complete. In this example, all tests are
// run in parallel with each other, and only with each other, regardless of
// other top-level tests that may be defined:
//
-// func GroupedParallel(h *harness.H) {
-// for _, tc := range tests {
-// tc := tc // capture range variable
-// h.Run(tc.Name, func(h *harness.H) {
-// h.Parallel()
-// ...
-// })
-// }
-// }
+// func GroupedParallel(h *harness.H) {
+// for _, tc := range tests {
+// tc := tc // capture range variable
+// h.Run(tc.Name, func(h *harness.H) {
+// h.Parallel()
+// ...
+// })
+// }
+// }
//
// Run does not return until parallel subtests have completed, providing a way
// to clean up after a group of parallel tests:
//
-// func TeardownParallel(h *harness.H) {
-// // This Run will not return until the parallel tests finish.
-// h.Run("group", func(h *harness.H) {
-// h.Run("Test1", parallelTest1)
-// h.Run("Test2", parallelTest2)
-// h.Run("Test3", parallelTest3)
-// })
-// //
-// }
+// func TeardownParallel(h *harness.H) {
+// // This Run will not return until the parallel tests finish.
+// h.Run("group", func(h *harness.H) {
+// h.Run("Test1", parallelTest1)
+// h.Run("Test2", parallelTest2)
+// h.Run("Test3", parallelTest3)
+// })
+// //
+// }
//
-// Suite
+// # Suite
//
// Individual tests are grouped into a test suite in order to execute them.
// TODO: this part of the API deviates from the "testing" package and is TBD.
@@ -111,5 +112,4 @@
// }
// fmt.Println("PASS")
// }
-//
package harness
diff --git a/mantle/lang/natsort/cmp.go b/mantle/lang/natsort/cmp.go
index 4fb73072fbd54429fbf317f7fb1ffd2d06e4f7fd..029b04f57a7b4c0dacab3be22ed47ddd2fb2fb18 100644
--- a/mantle/lang/natsort/cmp.go
+++ b/mantle/lang/natsort/cmp.go
@@ -19,15 +19,14 @@
// Strings are sorted as usual, except that decimal integer substrings
// are compared on their numeric value. For example:
//
-// a < a0 < a1 < a1a < a1b < a2 < a10 < a20
+// a < a0 < a1 < a1a < a1b < a2 < a10 < a20
//
// All white space and control characters are ignored.
//
// Leading zeros are *not* ignored, which tends to give more
// reasonable results on decimal fractions:
//
-// 1.001 < 1.002 < 1.010 < 1.02 < 1.1 < 1.3
-//
+// 1.001 < 1.002 < 1.010 < 1.02 < 1.1 < 1.3
package natsort
func isDigit(s string, i int) bool {
diff --git a/mantle/lang/worker/group.go b/mantle/lang/worker/group.go
index f9d625943f5fb0edba08aeea12495c8147f5a33f..4360e4d05e5e8a7f0b8d064066ace3d7bf09d330 100644
--- a/mantle/lang/worker/group.go
+++ b/mantle/lang/worker/group.go
@@ -26,10 +26,10 @@ type Worker func(context.Context) error
// WorkerGroup is similar in principle to sync.WaitGroup but manages the
// Workers itself. This allows it to provide a few helpful features:
-// - Integration with the context library.
-// - Limit the number of concurrent Workers.
-// - Capture the errors returned by each Worker.
-// - Abort everything after a single Worker reports an error.
+// - Integration with the context library.
+// - Limit the number of concurrent Workers.
+// - Capture the errors returned by each Worker.
+// - Abort everything after a single Worker reports an error.
type WorkerGroup struct {
ctx context.Context
cancel context.CancelFunc
@@ -98,7 +98,6 @@ func (wg *WorkerGroup) Wait() error {
// if err := wg.Start(worker); err != nil {
// return wg.WaitError(err)
// }
-//
func (wg *WorkerGroup) WaitError(err error) error {
if werr := wg.Wait(); werr != nil {
return werr
diff --git a/mantle/network/ntp/protocol.go b/mantle/network/ntp/protocol.go
index d2e00601f587928377017f962fb79773061e871d..3b7f85e103d16e4a7e936a4f6573e6131a3870ab 100644
--- a/mantle/network/ntp/protocol.go
+++ b/mantle/network/ntp/protocol.go
@@ -32,12 +32,11 @@ var be = binary.BigEndian
// NTP Short Format
//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Seconds | Fraction |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Seconds | Fraction |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
type Short struct {
Seconds uint16
Fraction uint16
@@ -55,14 +54,13 @@ func (s *Short) unmarshalBinary(b []byte) {
// NTP Timestamp Format
//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Seconds |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Fraction |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Seconds |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Fraction |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
type Timestamp struct {
Seconds uint32
Fraction uint32
@@ -134,52 +132,51 @@ const (
// NTP Packet Header Format
//
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |LI | VN |Mode | Stratum | Poll | Precision |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Root Delay |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Root Dispersion |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Reference ID |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// + Reference Timestamp (64) +
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// + Origin Timestamp (64) +
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// + Receive Timestamp (64) +
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// + Transmit Timestamp (64) +
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// . .
-// . Extension Field 1 (variable) .
-// . .
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// . .
-// . Extension Field 2 (variable) .
-// . .
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | Key Identifier |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | |
-// | dgst (128) |
-// | |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |LI | VN |Mode | Stratum | Poll | Precision |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Root Delay |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Root Dispersion |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Reference ID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Reference Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Origin Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Receive Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Transmit Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . .
+// . Extension Field 1 (variable) .
+// . .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . .
+// . Extension Field 2 (variable) .
+// . .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Key Identifier |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// | dgst (128) |
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
type Header struct {
LeapIndicator LeapIndicator // 2 bits
VersionNumber VersionNumber // 3 bits
diff --git a/mantle/util/distros.go b/mantle/util/distros.go
index 85eda44084ac4629df5ff34cafd80bc06f7900e4..08383c7f12245ee71671e3c86a4f60ae9e196241 100644
--- a/mantle/util/distros.go
+++ b/mantle/util/distros.go
@@ -26,7 +26,7 @@ import (
// the path to an artifact (usually a disk image).
func TargetDistroFromName(artifact string) string {
basename := filepath.Base(artifact)
- if strings.HasPrefix(basename, "rhcos-") {
+ if strings.HasPrefix(basename, "rhcos-") || strings.HasPrefix(basename, "scos-") {
return "rhcos"
}
// For now, just assume fcos
diff --git a/mantle/util/repo.go b/mantle/util/repo.go
index 0dd96568a57f9a25e267856cba20a05c4a5de546..ac91fcb42f1125133b97d73563e19f0b031cdd52 100644
--- a/mantle/util/repo.go
+++ b/mantle/util/repo.go
@@ -72,7 +72,7 @@ func GetLocalFastBuildQemu() (string, error) {
}
for _, ent := range ents {
if strings.HasSuffix(ent.Name(), ".qcow2") {
- return filepath.Join(".cosa", ent.Name()), nil
+ return filepath.Join(fastBuildCosaDir, ent.Name()), nil
}
}
return "", nil
diff --git a/schema/generate-schema.sh b/schema/generate-schema.sh
index d8f4037fc5631f2d2d3d7cc41dcfeddc55282276..73b50c7f864da55d01dbd447b7340598bc6bb7ed 100755
--- a/schema/generate-schema.sh
+++ b/schema/generate-schema.sh
@@ -1,22 +1,31 @@
-#!/bin/bash -xe
-mydir=$(readlink -f $(dirname $(basename $0)))
+#!/bin/bash
+set -euo pipefail
+topdir=$(git rev-parse --show-toplevel)
+mydir=$(dirname $(readlink -f $0))
tdir="${mydir}/tmp"
mkdir -p "${tdir}"
trap "rm -rfv ${tdir}" EXIT
-export GOBIN="$(readlink -f ../tools/bin)"
-if [ ! -x "${GOBIN}/schematyper" ]; then
- make -C ../tools
-fi
-
schema_version="v1"
-schema_json="../schema/${schema_version}.json"
+schema_json="${schema_version}.json"
+digest=$(sha256sum "${schema_json}" | awk '{print $1}')
echo "Generating COSA Schema ${schema_version}"
out="${tdir}/cosa_${schema_version}.go"
-"${GOBIN}/schematyper" \
- "${schema_version}.json" \
+schematyper=
+if test -f ${topdir}/bin/schematyper; then
+ schematyper=${topdir}/bin/schematyper
+else
+ schematyper=$(which schematyper 2>/dev/null || true)
+fi
+if test -z "${schematyper}"; then
+ env GOBIN=${topdir}/bin go install github.com/idubinskiy/schematyper@latest
+ schematyper=${topdir}/bin/schematyper
+fi
+
+${schematyper} \
+ ${topdir}/src/"${schema_version}.json" \
-o "${out}" \
--package="builds" \
--root-type=Build \
@@ -24,16 +33,17 @@ out="${tdir}/cosa_${schema_version}.go"
# Avoid having a filename in generated code since it
# can vary depending on local checkout paths.
-sed -e "s|^// generated.*|// generated by 'make schema'|g" -i ${tdir}/cosa_v1.go
+sed -e "s|^// generated.*|// generated by 'make schema'\n// source hash: ${digest}|g" -i ${tdir}/cosa_v1.go
cat > "${tdir}/schema_doc.go" <