From 2ee844641471e4d41e714ce388f95a59ecb7c315 Mon Sep 17 00:00:00 2001 From: wangyueliang Date: Tue, 27 Feb 2024 14:46:20 +0800 Subject: [PATCH] sync tons of qemu options optimization from upstream [upstream] f2a0924446a90630e455c198a5f465170eb22892 33cc9fd5c8d770915545728d0761ec87b69182e7 551e93e7d2bc2b54ae7bdd883c1c79c975c00b9c c8675841f60fddf3c43193eaee6215d134f40406 7a1f61e0c5990be0c4c858af4c0292abbbca91f3 66cff73f2533e5db78d17ce706f043579b4e4c2f 53be1b6c717cc145f866005bc488b44089a78448 0c2fe2768d8efcafde07e22b9d0978e7ef487c65 46269612f7c1d6ae6407d187ca5c927a9db698df 854e3c0d04000604917a646641872dcfefedcbb7 ea9f0220ec00142b8ddc9d2a5834ecd01bf21a45 54914bf2c9ba045fe2b7775fc2b198b939eb1ca0 025ba8570b296d96feadd867459ec8ada6808a76 5887a708dd63e0c5c93d429eae5b9c91044f90fa a1c54a9671e584802ed6d7eee53e68f1c2a4280f 8c91ec9f1b54871a1f70354d0a6c9c655d75e4d9 58397a09feb88d322c7574ada78894af4d75ca5b 7a3e9a537cd5e53dadff281219c1e80d85e10e14 008e04c322e99cdb769cdb36c010810c8625376b 970ebdcb1e76ebc6d3fde8c6f960a34548f4238c e3cbf040279375e6869adcb7fb253d5839edb714 b3facd95243656dd2292437caee99f81f523e657 ed3b488dd8cdeee9b50a5278a23ec24d1e6d4c62 --- docs/cosa/run.md | 9 +- mantle/cmd/kola/options.go | 11 + mantle/cmd/kola/qemuexec.go | 70 ++++-- mantle/platform/api/aliyun/api.go | 11 +- mantle/platform/api/aws/images.go | 21 +- mantle/platform/api/aws/network.go | 12 +- mantle/platform/api/azure/api.go | 137 ++++------ mantle/platform/api/azure/groups.go | 49 ++-- mantle/platform/api/azure/image.go | 38 ++- mantle/platform/api/azure/instance.go | 111 +++++---- mantle/platform/api/azure/network.go | 139 ++++++----- mantle/platform/api/azure/options.go | 4 +- mantle/platform/api/azure/storage.go | 170 ++++++++++++- mantle/platform/api/azure/storage_mit.go | 260 ------------------- mantle/platform/api/gcloud/api.go | 19 +- mantle/platform/api/gcloud/compute.go | 68 ++++- mantle/platform/api/gcloud/image.go | 43 +++- mantle/platform/api/ibmcloud/s3.go | 2 +- mantle/platform/api/openstack/api.go | 3 + mantle/platform/machine/gcloud/cluster.go | 13 +- mantle/platform/machine/qemu/cluster.go | 31 ++- mantle/platform/machine/qemu/flight.go | 9 +- mantle/platform/qemu.go | 290 ++++++++++++++++++---- mantle/platform/qmp_util.go | 3 +- mantle/util/common.go | 36 +++ src/libguestfish.sh | 6 +- 26 files changed, 953 insertions(+), 612 deletions(-) delete mode 100644 mantle/platform/api/azure/storage_mit.go diff --git a/docs/cosa/run.md b/docs/cosa/run.md index e21e3223..5a01c143 100644 --- a/docs/cosa/run.md +++ b/docs/cosa/run.md @@ -83,7 +83,14 @@ lrwxrwxrwx. 1 root root 10 Jan 28 19:23 virtio-primary-disk-part4 -> ../../vdc4 ``` Additional disks CLI arguments support optional flags using the `--add-disk -2G:OPT1,OPT2,...` syntax. An example is `mpath`, discussed below. +2G:OPT1,OPT2,...` syntax. Supported options are: + +- `mpath`: enables multipathing for the disk (see below for details). +- `4k`: sets the disk as 4Kn (4096 physical sector size) +- `channel=CHANNEL`: set the channel type (e.g. `virtio`, `nvme`) +- `serial=NAME`: sets the disk serial; this can then be used to customize the + default `diskN` naming documented above (e.g. `serial=foobar` will make the + device show up as `/dev/disk/by-id/virtio-foobar`) ## Additional kernel arguments diff --git a/mantle/cmd/kola/options.go b/mantle/cmd/kola/options.go index fa7801a5..a254a1cf 100644 --- a/mantle/cmd/kola/options.go +++ b/mantle/cmd/kola/options.go @@ -146,6 +146,7 @@ func init() { sv(&kola.QEMUOptions.Firmware, "qemu-firmware", "", "Boot firmware: bios,uefi,uefi-secure (default bios)") sv(&kola.QEMUOptions.DiskImage, "qemu-image", "", "path to CoreOS disk image") sv(&kola.QEMUOptions.DiskSize, "qemu-size", "", "Resize target disk via qemu-img resize [+]SIZE") + sv(&kola.QEMUOptions.DriveOpts, "qemu-drive-opts", "", "Arbitrary options to append to qemu -drive for primary disk") sv(&kola.QEMUOptions.Memory, "qemu-memory", "", "Default memory size in MB") bv(&kola.QEMUOptions.NbdDisk, "qemu-nbd-socket", false, "Present the disks over NBD socket to qemu") bv(&kola.QEMUOptions.MultiPathDisk, "qemu-multipath", false, "Enable multiple paths for the main disk") @@ -155,6 +156,10 @@ func init() { sv(&kola.QEMUIsoOptions.IsoPath, "qemu-iso", "", "path to CoreOS ISO image") bv(&kola.QEMUIsoOptions.AsDisk, "qemu-iso-as-disk", false, "attach ISO image as regular disk") + // s390x secex specific options + bv(&kola.QEMUOptions.SecureExecution, "qemu-secex", false, "Run IBM Secure Execution Image") + sv(&kola.QEMUOptions.SecureExecutionIgnitionPubKey, "qemu-secex-ignition-pubkey", "", "Path to Ignition GPG Public Key") + sv(&kola.QEMUOptions.SecureExecutionHostKey, "qemu-secex-hostkey", "", "Path to Secure Execution HKD certificate") } // Sync up the command line options if there is dependency @@ -333,6 +338,12 @@ func syncOptions() error { func syncCosaOptions() error { switch kolaPlatform { case "qemu": + if kola.QEMUOptions.SecureExecution && kola.QEMUOptions.DiskImage == "" && kola.CosaBuild.Meta.BuildArtifacts.SecureExecutionQemu != nil { + kola.QEMUOptions.DiskImage = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.SecureExecutionQemu.Path) + } + if kola.QEMUOptions.SecureExecutionIgnitionPubKey == "" && kola.CosaBuild.Meta.BuildArtifacts.SecureExecutionIgnitionPubKey != nil { + kola.QEMUOptions.SecureExecutionIgnitionPubKey = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.SecureExecutionIgnitionPubKey.Path) + } if kola.QEMUOptions.DiskImage == "" && kola.CosaBuild.Meta.BuildArtifacts.Qemu != nil { kola.QEMUOptions.DiskImage = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.Qemu.Path) } diff --git a/mantle/cmd/kola/qemuexec.go b/mantle/cmd/kola/qemuexec.go index 7bc2915d..2e0f4c8b 100644 --- a/mantle/cmd/kola/qemuexec.go +++ b/mantle/cmd/kola/qemuexec.go @@ -122,6 +122,41 @@ func parseBindOpt(s string) (string, string, error) { return parts[0], parts[1], nil } +// buildDiskFromOptions generates a disk image template using the process-global +// defaults that were parsed from command line arguments. +func buildDiskFromOptions() *platform.Disk { + channel := "virtio" + if kola.QEMUOptions.Nvme { + channel = "nvme" + } + sectorSize := 0 + if kola.QEMUOptions.Native4k { + sectorSize = 4096 + } + options := []string{} + if kola.QEMUOptions.DriveOpts != "" { + options = append(options, strings.Split(kola.QEMUOptions.DriveOpts, ",")...) + } + // If there was no disk image specified and no size then just + // default to an arbitrary value of 12G for the blank disk image. + size := kola.QEMUOptions.DiskSize + if kola.QEMUOptions.DiskImage == "" && kola.QEMUOptions.DiskSize == "" { + size = "12G" + } + // Build the disk definition. Note that if kola.QEMUOptions.DiskImage is + // "" we'll just end up with a blank disk image, which is what we want. + disk := &platform.Disk{ + BackingFile: kola.QEMUOptions.DiskImage, + Channel: channel, + Size: size, + SectorSize: sectorSize, + DriveOpts: options, + MultiPathDisk: kola.QEMUOptions.MultiPathDisk, + NbdDisk: kola.QEMUOptions.NbdDisk, + } + return disk +} + func runQemuExec(cmd *cobra.Command, args []string) error { var err error @@ -274,31 +309,24 @@ func runQemuExec(cmd *cobra.Command, args []string) error { builder.Firmware = kola.QEMUOptions.Firmware } if kola.QEMUOptions.DiskImage != "" { - channel := "virtio" - if kola.QEMUOptions.Nvme { - channel = "nvme" - } - sectorSize := 0 - if kola.QEMUOptions.Native4k { - sectorSize = 4096 + if err := builder.AddBootDisk(buildDiskFromOptions()); err != nil { + return err } - err = builder.AddBootDisk(&platform.Disk{ - BackingFile: kola.QEMUOptions.DiskImage, - Channel: channel, - Size: kola.QEMUOptions.DiskSize, - SectorSize: sectorSize, - MultiPathDisk: kola.QEMUOptions.MultiPathDisk, - NbdDisk: kola.QEMUOptions.NbdDisk, - }) + if err != nil { return err } } if kola.QEMUIsoOptions.IsoPath != "" { - err := builder.AddIso(kola.QEMUIsoOptions.IsoPath, "", kola.QEMUIsoOptions.AsDisk) + err := builder.AddIso(kola.QEMUIsoOptions.IsoPath, "bootindex=3", kola.QEMUIsoOptions.AsDisk) if err != nil { return err } + // TODO: if kola.QEMUOptions.DiskImage != "" & kola.QEMUIsoOptions.IsoPath != "", still add a blank disk? + // Add a blank disk (this is a disk we can install to) + if err := builder.AddBootDisk(buildDiskFromOptions()); err != nil { + return err + } } builder.Hostname = hostname // for historical reasons, both --memory and --qemu-memory are supported @@ -310,6 +338,8 @@ func runQemuExec(cmd *cobra.Command, args []string) error { return errors.Wrapf(err, "parsing memory option") } builder.Memory = int(parsedMem) + } else if kola.QEMUOptions.SecureExecution { + builder.MemoryMiB = 4096 // SE needs at least 4GB } if err = builder.AddDisksFromSpecs(addDisks); err != nil { return err @@ -333,6 +363,14 @@ func runQemuExec(cmd *cobra.Command, args []string) error { builder.ConsoleFile = consoleFile builder.Append(args...) + // IBM Secure Execution + if kola.QEMUOptions.SecureExecution { + err := builder.SetSecureExecution(kola.QEMUOptions.SecureExecutionIgnitionPubKey, kola.QEMUOptions.SecureExecutionHostKey, config) + if err != nil { + return err + } + } + if devshell && !devshellConsole { return runDevShellSSH(ctx, builder, config, sshCommand) } diff --git a/mantle/platform/api/aliyun/api.go b/mantle/platform/api/aliyun/api.go index 1b19d1b2..aac9e3c4 100644 --- a/mantle/platform/api/aliyun/api.go +++ b/mantle/platform/api/aliyun/api.go @@ -239,6 +239,7 @@ func (a *API) ImportImage(format, bucket, object, image_size, device, name, desc // Wait for the import image task and return the image id. See also similar // code in AWS' finishSnapshotTask. func (a *API) finishImportImageTask(importImageResponse *ecs.ImportImageResponse) (string, error) { + lastStatus := "" importDone := func(taskId string) (bool, error) { request := ecs.CreateDescribeTasksRequest() request.SetConnectTimeout(defaultConnectTimeout) @@ -253,7 +254,13 @@ func (a *API) finishImportImageTask(importImageResponse *ecs.ImportImageResponse return false, fmt.Errorf("expected result about one task, got %v", res.TaskSet.Task) } - switch res.TaskSet.Task[0].TaskStatus { + currentStatus := res.TaskSet.Task[0].TaskStatus + if currentStatus != lastStatus { + plog.Infof("task %v transitioned to status: %v", taskId, currentStatus) + lastStatus = currentStatus + } + + switch currentStatus { case "Finished": return true, nil case "Processing": @@ -267,7 +274,7 @@ func (a *API) finishImportImageTask(importImageResponse *ecs.ImportImageResponse case "Failed": return false, fmt.Errorf("task %v failed", taskId) default: - return false, fmt.Errorf("unexpected status for task %v: %v", taskId, res.TaskSet.Task[0].TaskStatus) + return false, fmt.Errorf("unexpected status for task %v: %v", taskId, currentStatus) } } diff --git a/mantle/platform/api/aws/images.go b/mantle/platform/api/aws/images.go index c465ad44..70779a48 100644 --- a/mantle/platform/api/aws/images.go +++ b/mantle/platform/api/aws/images.go @@ -329,21 +329,28 @@ func (a *API) CreateImportRole(bucket string) error { return nil } -func (a *API) CreateHVMImage(snapshotID string, diskSizeGiB uint, name string, description string, architecture string) (string, error) { +func (a *API) CreateHVMImage(snapshotID string, diskSizeGiB uint, name string, description string, architecture string, volumetype string, imdsv2Only bool, X86BootMode string) (string, error) { var awsArch string + var bootmode string if architecture == "" { architecture = runtime.GOARCH } switch architecture { case "amd64", "x86_64": awsArch = ec2.ArchitectureTypeX8664 + bootmode = X86BootMode case "arm64", "aarch64": awsArch = ec2.ArchitectureTypeArm64 + bootmode = "uefi" default: return "", fmt.Errorf("unsupported ec2 architecture %q", architecture) } - return a.createImage(&ec2.RegisterImageInput{ + // default to gp3 + if volumetype == "" { + volumetype = "gp3" + } + params := &ec2.RegisterImageInput{ Name: aws.String(name), Description: aws.String(description), Architecture: aws.String(awsArch), @@ -356,7 +363,7 @@ func (a *API) CreateHVMImage(snapshotID string, diskSizeGiB uint, name string, d SnapshotId: aws.String(snapshotID), DeleteOnTermination: aws.Bool(true), VolumeSize: aws.Int64(int64(diskSizeGiB)), - VolumeType: aws.String("gp2"), + VolumeType: aws.String(volumetype), }, }, { @@ -366,7 +373,13 @@ func (a *API) CreateHVMImage(snapshotID string, diskSizeGiB uint, name string, d }, EnaSupport: aws.Bool(true), SriovNetSupport: aws.String("simple"), - }) + BootMode: aws.String(bootmode), + } + if imdsv2Only { + params.ImdsSupport = aws.String("v2.0") + } + + return a.createImage(params) } func (a *API) deregisterImageIfExists(name string) error { diff --git a/mantle/platform/api/aws/network.go b/mantle/platform/api/aws/network.go index c53df3e2..e5eb8c54 100644 --- a/mantle/platform/api/aws/network.go +++ b/mantle/platform/api/aws/network.go @@ -20,6 +20,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/coreos/coreos-assembler/mantle/util" ) // getSecurityGroupID gets a security group matching the given name. @@ -246,8 +248,16 @@ func (a *API) createInternetGateway(name, vpcId string) (string, error) { // createSubnets creates a subnet in each availability zone for the region // that is associated with the given VPC associated with the given RouteTable +// NOTE: we ignore local and wavelength availability zones here. func (a *API) createSubnets(name, vpcId, routeTableId string) error { - azs, err := a.ec2.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{}) + azs, err := a.ec2.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("zone-type"), + Values: []*string{util.StrToPtr("availability-zone")}, + }, + }, + }) if err != nil { return fmt.Errorf("retrieving availability zones: %v", err) } diff --git a/mantle/platform/api/azure/api.go b/mantle/platform/api/azure/api.go index a098a49b..e09ede8b 100644 --- a/mantle/platform/api/azure/api.go +++ b/mantle/platform/api/azure/api.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,78 +19,43 @@ import ( "fmt" "math/rand" "os" - "os/user" - "path/filepath" "strings" "time" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/Azure/azure-sdk-for-go/arm/resources/resources" - armStorage "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/coreos/pkg/capnslog" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" - internalAuth "github.com/coreos/coreos-assembler/mantle/auth" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/coreos-assembler/mantle", "platform/api/azure") + "github.com/coreos/coreos-assembler/mantle/auth" ) type API struct { - rgClient resources.GroupsClient - imgClient compute.ImagesClient - compClient compute.VirtualMachinesClient - netClient network.VirtualNetworksClient - subClient network.SubnetsClient - ipClient network.PublicIPAddressesClient - intClient network.InterfacesClient - accClient armStorage.AccountsClient + azIdCred *azidentity.DefaultAzureCredential + rgClient *armresources.ResourceGroupsClient + imgClient *armcompute.ImagesClient + compClient *armcompute.VirtualMachinesClient + netClient *armnetwork.VirtualNetworksClient + subClient *armnetwork.SubnetsClient + ipClient *armnetwork.PublicIPAddressesClient + intClient *armnetwork.InterfacesClient + accClient *armstorage.AccountsClient opts *Options } // New creates a new Azure client. If no publish settings file is provided or // can't be parsed, an anonymous client is created. func New(opts *Options) (*API, error) { - if opts.StorageEndpointSuffix == "" { - opts.StorageEndpointSuffix = storage.DefaultBaseURL - } - - profiles, err := internalAuth.ReadAzureProfile(opts.AzureProfile) + azCreds, err := auth.ReadAzureCredentials(opts.AzureCredentials) if err != nil { - return nil, fmt.Errorf("couldn't read Azure profile: %v", err) + return nil, fmt.Errorf("couldn't read Azure Credentials file: %v", err) } - subOpts := profiles.SubscriptionOptions(opts.AzureSubscription) - if subOpts == nil { - return nil, fmt.Errorf("Azure subscription named %q doesn't exist in %q", opts.AzureSubscription, opts.AzureProfile) - } - - if os.Getenv("AZURE_AUTH_LOCATION") == "" { - if opts.AzureAuthLocation == "" { - user, err := user.Current() - if err != nil { - return nil, err - } - opts.AzureAuthLocation = filepath.Join(user.HomeDir, internalAuth.AzureAuthPath) - } - // TODO: Move to Flight once built to allow proper unsetting - os.Setenv("AZURE_AUTH_LOCATION", opts.AzureAuthLocation) - } - - if opts.SubscriptionID == "" { - opts.SubscriptionID = subOpts.SubscriptionID - } - - if opts.SubscriptionName == "" { - opts.SubscriptionName = subOpts.SubscriptionName - } - - if opts.StorageEndpointSuffix == "" { - opts.StorageEndpointSuffix = subOpts.StorageEndpointSuffix - } + opts.SubscriptionID = azCreds.SubscriptionID + os.Setenv("AZURE_CLIENT_ID", azCreds.ClientID) + os.Setenv("AZURE_TENANT_ID", azCreds.TenantID) + os.Setenv("AZURE_CLIENT_SECRET", azCreds.ClientSecret) api := &API{ opts: opts, @@ -103,43 +69,49 @@ func New(opts *Options) (*API, error) { } func (a *API) SetupClients() error { - auther, err := auth.GetClientSetup(resources.DefaultBaseURI) + var err error + a.azIdCred, err = azidentity.NewDefaultAzureCredential(nil) if err != nil { return err } - a.rgClient = resources.NewGroupsClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.rgClient.Authorizer = auther - auther, err = auth.GetClientSetup(compute.DefaultBaseURI) + a.rgClient, err = armresources.NewResourceGroupsClient(a.opts.SubscriptionID, a.azIdCred, nil) if err != nil { return err } - a.imgClient = compute.NewImagesClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.imgClient.Authorizer = auther - a.compClient = compute.NewVirtualMachinesClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.compClient.Authorizer = auther - auther, err = auth.GetClientSetup(network.DefaultBaseURI) + a.imgClient, err = armcompute.NewImagesClient(a.opts.SubscriptionID, a.azIdCred, nil) if err != nil { return err } - a.netClient = network.NewVirtualNetworksClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.netClient.Authorizer = auther - a.subClient = network.NewSubnetsClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.subClient.Authorizer = auther - a.ipClient = network.NewPublicIPAddressesClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.ipClient.Authorizer = auther - a.intClient = network.NewInterfacesClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.intClient.Authorizer = auther - - auther, err = auth.GetClientSetup(armStorage.DefaultBaseURI) + + a.compClient, err = armcompute.NewVirtualMachinesClient(a.opts.SubscriptionID, a.azIdCred, nil) if err != nil { return err } - a.accClient = armStorage.NewAccountsClientWithBaseURI(auther.BaseURI, auther.SubscriptionID) - a.accClient.Authorizer = auther - return nil + a.netClient, err = armnetwork.NewVirtualNetworksClient(a.opts.SubscriptionID, a.azIdCred, nil) + if err != nil { + return err + } + + a.subClient, err = armnetwork.NewSubnetsClient(a.opts.SubscriptionID, a.azIdCred, nil) + if err != nil { + return err + } + + a.ipClient, err = armnetwork.NewPublicIPAddressesClient(a.opts.SubscriptionID, a.azIdCred, nil) + if err != nil { + return err + } + + a.intClient, err = armnetwork.NewInterfacesClient(a.opts.SubscriptionID, a.azIdCred, nil) + if err != nil { + return err + } + + a.accClient, err = armstorage.NewAccountsClient(a.opts.SubscriptionID, a.azIdCred, nil) + return err } func randomName(prefix string) string { @@ -151,23 +123,22 @@ func randomName(prefix string) string { func (a *API) GC(gracePeriod time.Duration) error { durationAgo := time.Now().Add(-1 * gracePeriod) - listGroups, err := a.ListResourceGroups("") + resourceGroups, err := a.ListResourceGroups() if err != nil { return fmt.Errorf("listing resource groups: %v", err) } - for _, l := range *listGroups.Value { + for _, l := range resourceGroups { if strings.HasPrefix(*l.Name, "kola-cluster") { terminate := false - if l.Tags == nil || (*l.Tags)["createdAt"] == nil { + if l.Tags == nil || l.Tags["createdAt"] == nil { // If the group name starts with kola-cluster and has // no tags OR no createdAt then it failed to properly // get created and we should clean it up. // https://github.com/coreos/coreos-assembler/issues/3057 terminate = true } else { - createdAt := *(*l.Tags)["createdAt"] - timeCreated, err := time.Parse(time.RFC3339, createdAt) + timeCreated, err := time.Parse(time.RFC3339, *l.Tags["createdAt"]) if err != nil { return fmt.Errorf("error parsing time: %v", err) } diff --git a/mantle/platform/api/azure/groups.go b/mantle/platform/api/azure/groups.go index f8c9b568..e42a159a 100644 --- a/mantle/platform/api/azure/groups.go +++ b/mantle/platform/api/azure/groups.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,24 +16,24 @@ package azure import ( + "context" "time" - "github.com/Azure/azure-sdk-for-go/arm/resources/resources" - - "github.com/coreos/coreos-assembler/mantle/util" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" ) func (a *API) CreateResourceGroup(prefix string) (string, error) { name := randomName(prefix) tags := map[string]*string{ - "createdAt": util.StrToPtr(time.Now().Format(time.RFC3339)), - "createdBy": util.StrToPtr("mantle"), + "createdAt": to.Ptr(time.Now().Format(time.RFC3339)), + "createdBy": to.Ptr("mantle"), } - _, err := a.rgClient.CreateOrUpdate(name, resources.Group{ - Location: &a.opts.Location, - Tags: &tags, - }) + _, err := a.rgClient.CreateOrUpdate(context.Background(), name, armresources.ResourceGroup{ + Location: to.Ptr(a.opts.Location), + Tags: tags, + }, nil) if err != nil { return "", err } @@ -41,18 +42,38 @@ func (a *API) CreateResourceGroup(prefix string) (string, error) { } func (a *API) TerminateResourceGroup(name string) error { - resp, err := a.rgClient.CheckExistence(name) + resp, err := a.rgClient.CheckExistence(context.Background(), name, nil) if err != nil { return err } - if resp.StatusCode != 204 { + if !resp.Success { return nil } - _, err = a.rgClient.Delete(name, nil) + // Request the delete and wait until the resource group is cleaned up. + ctx := context.Background() + poller, err := a.rgClient.BeginDelete(ctx, name, nil) + if err != nil { + return err + } + + _, err = poller.PollUntilDone(ctx, nil) + return err } -func (a *API) ListResourceGroups(filter string) (resources.GroupListResult, error) { - return a.rgClient.List(filter, nil) +func (a *API) ListResourceGroups() ([]*armresources.ResourceGroup, error) { + ctx := context.Background() + + resultPager := a.rgClient.NewListPager(nil) + + resourceGroups := make([]*armresources.ResourceGroup, 0) + for resultPager.More() { + pageResp, err := resultPager.NextPage(ctx) + if err != nil { + return nil, err + } + resourceGroups = append(resourceGroups, pageResp.ResourceGroupListResult.Value...) + } + return resourceGroups, nil } diff --git a/mantle/platform/api/azure/image.go b/mantle/platform/api/azure/image.go index 056642a5..2df2b10a 100644 --- a/mantle/platform/api/azure/image.go +++ b/mantle/platform/api/azure/image.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,32 +16,45 @@ package azure import ( - "github.com/Azure/azure-sdk-for-go/arm/compute" + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" ) -func (a *API) CreateImage(name, resourceGroup, blobURI string) (compute.Image, error) { - _, err := a.imgClient.CreateOrUpdate(resourceGroup, name, compute.Image{ +func (a *API) CreateImage(name, resourceGroup, blobURI string) (armcompute.Image, error) { + ctx := context.Background() + poller, err := a.imgClient.BeginCreateOrUpdate(ctx, resourceGroup, name, armcompute.Image{ Name: &name, Location: &a.opts.Location, - ImageProperties: &compute.ImageProperties{ - StorageProfile: &compute.ImageStorageProfile{ - OsDisk: &compute.ImageOSDisk{ - OsType: compute.Linux, - OsState: compute.Generalized, + Properties: &armcompute.ImageProperties{ + HyperVGeneration: to.Ptr(armcompute.HyperVGenerationTypesV1), + StorageProfile: &armcompute.ImageStorageProfile{ + OSDisk: &armcompute.ImageOSDisk{ + OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), + OSState: to.Ptr(armcompute.OperatingSystemStateTypesGeneralized), BlobURI: &blobURI, }, }, }, }, nil) if err != nil { - return compute.Image{}, err + return armcompute.Image{}, err } - - return a.imgClient.Get(resourceGroup, name, "") + resp, err := poller.PollUntilDone(context.Background(), nil) + if err != nil { + return armcompute.Image{}, err + } + return resp.Image, nil } // DeleteImage removes Azure image func (a *API) DeleteImage(name, resourceGroup string) error { - _, err := a.imgClient.Delete(resourceGroup, name, nil) + ctx := context.Background() + poller, err := a.imgClient.BeginDelete(ctx, resourceGroup, name, nil) + if err != nil { + return err + } + _, err = poller.PollUntilDone(ctx, nil) return err } diff --git a/mantle/platform/api/azure/instance.go b/mantle/platform/api/azure/instance.go index 6042ef80..8a3ef461 100644 --- a/mantle/platform/api/azure/instance.go +++ b/mantle/platform/api/azure/instance.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +16,7 @@ package azure import ( + "context" "crypto/rand" "encoding/base64" "fmt" @@ -24,8 +26,9 @@ import ( "regexp" "time" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" "github.com/coreos/coreos-assembler/mantle/util" ) @@ -38,7 +41,15 @@ type Machine struct { PublicIPName string } -func (a *API) getVMParameters(name, userdata, sshkey, storageAccountURI string, ip *network.PublicIPAddress, nic *network.Interface) compute.VirtualMachine { +func (a *API) getInstance(name, resourceGroup string) (armcompute.VirtualMachine, error) { + resp, err := a.compClient.Get(context.Background(), resourceGroup, name, &armcompute.VirtualMachinesClientGetOptions{Expand: to.Ptr(armcompute.InstanceViewTypesInstanceView)}) + if err != nil { + return armcompute.VirtualMachine{}, err + } + return resp.VirtualMachine, nil +} + +func (a *API) getVMParameters(name, userdata, sshkey, storageAccountURI string, ip armnetwork.PublicIPAddress, nic armnetwork.Interface) armcompute.VirtualMachine { // Azure requires that either a username/password be set or an SSH key. // @@ -60,17 +71,17 @@ func (a *API) getVMParameters(name, userdata, sshkey, storageAccountURI string, panic(fmt.Sprintf("calling crypto/rand.Int() failed and that shouldn't happen: %v", err)) } password := fmt.Sprintf("%s%s%s", "ABC&", n, "xyz") - osProfile := compute.OSProfile{ - AdminUsername: util.StrToPtr("nest"), // unused - AdminPassword: util.StrToPtr(password), // unused + osProfile := armcompute.OSProfile{ + AdminUsername: to.Ptr("core"), // unused + AdminPassword: to.Ptr(password), // unused ComputerName: &name, } if sshkey != "" { - osProfile.LinuxConfiguration = &compute.LinuxConfiguration{ - SSH: &compute.SSHConfiguration{ - PublicKeys: &[]compute.SSHPublicKey{ + osProfile.LinuxConfiguration = &armcompute.LinuxConfiguration{ + SSH: &armcompute.SSHConfiguration{ + PublicKeys: []*armcompute.SSHPublicKey{ { - Path: util.StrToPtr("/home/nest/.ssh/authorized_keys"), + Path: to.Ptr("/home/core/.ssh/authorized_keys"), KeyData: &sshkey, }, }, @@ -81,49 +92,49 @@ func (a *API) getVMParameters(name, userdata, sshkey, storageAccountURI string, ud := base64.StdEncoding.EncodeToString([]byte(userdata)) osProfile.CustomData = &ud } - var imgRef *compute.ImageReference + var imgRef *armcompute.ImageReference if a.opts.DiskURI != "" { - imgRef = &compute.ImageReference{ + imgRef = &armcompute.ImageReference{ ID: &a.opts.DiskURI, } } else { - imgRef = &compute.ImageReference{ + imgRef = &armcompute.ImageReference{ Publisher: &a.opts.Publisher, Offer: &a.opts.Offer, - Sku: &a.opts.Sku, + SKU: &a.opts.Sku, Version: &a.opts.Version, } } - return compute.VirtualMachine{ + return armcompute.VirtualMachine{ Name: &name, Location: &a.opts.Location, - Tags: &map[string]*string{ - "createdBy": util.StrToPtr("mantle"), + Tags: map[string]*string{ + "createdBy": to.Ptr("mantle"), }, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - HardwareProfile: &compute.HardwareProfile{ - VMSize: compute.VirtualMachineSizeTypes(a.opts.Size), + Properties: &armcompute.VirtualMachineProperties{ + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: to.Ptr(armcompute.VirtualMachineSizeTypes(a.opts.Size)), }, - StorageProfile: &compute.StorageProfile{ + StorageProfile: &armcompute.StorageProfile{ ImageReference: imgRef, - OsDisk: &compute.OSDisk{ - CreateOption: compute.FromImage, + OSDisk: &armcompute.OSDisk{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), }, }, - OsProfile: &osProfile, - NetworkProfile: &compute.NetworkProfile{ - NetworkInterfaces: &[]compute.NetworkInterfaceReference{ + OSProfile: &osProfile, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { ID: nic.ID, - NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{ - Primary: util.BoolToPtr(true), + Properties: &armcompute.NetworkInterfaceReferenceProperties{ + Primary: to.Ptr(true), }, }, }, }, - DiagnosticsProfile: &compute.DiagnosticsProfile{ - BootDiagnostics: &compute.BootDiagnostics{ - Enabled: util.BoolToPtr(true), + DiagnosticsProfile: &armcompute.DiagnosticsProfile{ + BootDiagnostics: &armcompute.BootDiagnostics{ + Enabled: to.Ptr(true), StorageURI: &storageAccountURI, }, }, @@ -155,22 +166,25 @@ func (a *API) CreateInstance(name, userdata, sshkey, resourceGroup, storageAccou vmParams := a.getVMParameters(name, userdata, sshkey, fmt.Sprintf("https://%s.blob.core.windows.net/", storageAccount), ip, nic) - cancel := make(chan struct{}) - time.AfterFunc(8*time.Minute, func() { - close(cancel) - }) - _, err = a.compClient.CreateOrUpdate(resourceGroup, name, vmParams, cancel) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + poller, err := a.compClient.BeginCreateOrUpdate(ctx, resourceGroup, name, vmParams, nil) if err != nil { return nil, fmt.Errorf("creating instance failed: %w", err) } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return nil, fmt.Errorf("waiting on instance creation failed: %w", err) + } err = util.WaitUntilReady(5*time.Minute, 10*time.Second, func() (bool, error) { - vm, err := a.compClient.Get(resourceGroup, name, "") + resp, err := a.compClient.Get(context.Background(), resourceGroup, name, &armcompute.VirtualMachinesClientGetOptions{Expand: nil}) if err != nil { return false, err } - if vm.VirtualMachineProperties.ProvisioningState != nil && *vm.VirtualMachineProperties.ProvisioningState != "Succeeded" { + state := resp.VirtualMachine.Properties.ProvisioningState + if state != nil && *state != "Succeeded" { return false, nil } @@ -183,7 +197,7 @@ func (a *API) CreateInstance(name, userdata, sshkey, resourceGroup, storageAccou return nil, fmt.Errorf("waiting for machine to become active: %v", err) } - vm, err := a.compClient.Get(resourceGroup, name, "") + vm, err := a.getInstance(name, resourceGroup) if err != nil { return nil, err } @@ -207,7 +221,12 @@ func (a *API) CreateInstance(name, userdata, sshkey, resourceGroup, storageAccou } func (a *API) TerminateInstance(name, resourceGroup string) error { - _, err := a.compClient.Delete(resourceGroup, name, nil) + ctx := context.Background() + poller, err := a.compClient.BeginDelete(ctx, resourceGroup, name, &armcompute.VirtualMachinesClientBeginDeleteOptions{ForceDeletion: to.Ptr(true)}) + if err != nil { + return err + } + _, err = poller.PollUntilDone(ctx, nil) return err } @@ -220,21 +239,21 @@ func (a *API) GetConsoleOutput(name, resourceGroup, storageAccount string) ([]by if kr.Keys == nil { return nil, fmt.Errorf("no storage service keys found") } - k := *kr.Keys - key := *k[0].Value + k := kr.Keys + key := k[0].Value - vm, err := a.compClient.Get(resourceGroup, name, compute.InstanceView) + vm, err := a.getInstance(name, resourceGroup) if err != nil { return nil, err } - consoleURI := vm.VirtualMachineProperties.InstanceView.BootDiagnostics.SerialConsoleLogBlobURI + consoleURI := vm.Properties.InstanceView.BootDiagnostics.SerialConsoleLogBlobURI if consoleURI == nil { return nil, fmt.Errorf("serial console URI is nil") } // Only the full URI to the logs are present in the virtual machine - // properties. Parse out the container & file name to use the GetBlob + // properties. Parse out the container & file name to use the GetBlockBlob // API call directly. uri := []byte(*consoleURI) containerPat := regexp.MustCompile(`bootdiagnostics-kola[a-z0-9\-]+`) @@ -244,7 +263,7 @@ func (a *API) GetConsoleOutput(name, resourceGroup, storageAccount string) ([]by var data io.ReadCloser err = util.Retry(6, 10*time.Second, func() error { - data, err = a.GetBlob(storageAccount, key, container, blobname) + data, err = a.GetBlockBlob(storageAccount, *key, container, blobname) return err }) if err != nil { diff --git a/mantle/platform/api/azure/network.go b/mantle/platform/api/azure/network.go index 26dc6c08..872b48e5 100644 --- a/mantle/platform/api/azure/network.go +++ b/mantle/platform/api/azure/network.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,85 +16,99 @@ package azure import ( + "context" "fmt" - "github.com/Azure/azure-sdk-for-go/arm/network" - - "github.com/coreos/coreos-assembler/mantle/util" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" ) var ( - virtualNetworkPrefix = []string{"10.0.0.0/16"} + virtualNetworkPrefix = "10.0.0.0/16" subnetPrefix = "10.0.0.0/24" ) -func (a *API) PrepareNetworkResources(resourceGroup string) (network.Subnet, error) { +func (a *API) PrepareNetworkResources(resourceGroup string) (armnetwork.Subnet, error) { if err := a.createVirtualNetwork(resourceGroup); err != nil { - return network.Subnet{}, err + return armnetwork.Subnet{}, err } return a.createSubnet(resourceGroup) } func (a *API) createVirtualNetwork(resourceGroup string) error { - _, err := a.netClient.CreateOrUpdate(resourceGroup, "kola-vn", network.VirtualNetwork{ - Location: &a.opts.Location, - VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{ - AddressSpace: &network.AddressSpace{ - AddressPrefixes: &virtualNetworkPrefix, + ctx := context.Background() + poller, err := a.netClient.BeginCreateOrUpdate(ctx, resourceGroup, "kola-vn", armnetwork.VirtualNetwork{ + Location: to.Ptr(a.opts.Location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{to.Ptr(virtualNetworkPrefix)}, }, }, }, nil) - + if err != nil { + return err + } + _, err = poller.PollUntilDone(ctx, nil) return err } -func (a *API) createSubnet(resourceGroup string) (network.Subnet, error) { - _, err := a.subClient.CreateOrUpdate(resourceGroup, "kola-vn", "kola-subnet", network.Subnet{ - SubnetPropertiesFormat: &network.SubnetPropertiesFormat{ - AddressPrefix: &subnetPrefix, +func (a *API) createSubnet(resourceGroup string) (armnetwork.Subnet, error) { + ctx := context.Background() + poller, err := a.subClient.BeginCreateOrUpdate(ctx, resourceGroup, "kola-vn", "kola-subnet", armnetwork.Subnet{ + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr(subnetPrefix), }, }, nil) if err != nil { - return network.Subnet{}, err + return armnetwork.Subnet{}, err } - - return a.getSubnet(resourceGroup) + resp, err := poller.PollUntilDone(ctx, nil) + if err != nil { + return armnetwork.Subnet{}, err + } + return resp.Subnet, nil } -func (a *API) getSubnet(resourceGroup string) (network.Subnet, error) { - return a.subClient.Get(resourceGroup, "kola-vn", "kola-subnet", "") +func (a *API) getSubnet(resourceGroup string) (armnetwork.Subnet, error) { + resp, err := a.subClient.Get(context.Background(), resourceGroup, "kola-vn", "kola-subnet", &armnetwork.SubnetsClientGetOptions{Expand: nil}) + if err != nil { + return armnetwork.Subnet{}, err + } + return resp.Subnet, nil } -func (a *API) createPublicIP(resourceGroup string) (*network.PublicIPAddress, error) { +func (a *API) createPublicIP(resourceGroup string) (armnetwork.PublicIPAddress, error) { name := randomName("ip") + ctx := context.Background() - _, err := a.ipClient.CreateOrUpdate(resourceGroup, name, network.PublicIPAddress{ - Location: &a.opts.Location, + poller, err := a.ipClient.BeginCreateOrUpdate(ctx, resourceGroup, name, armnetwork.PublicIPAddress{ + Location: to.Ptr(a.opts.Location), }, nil) if err != nil { - return nil, err + return armnetwork.PublicIPAddress{}, err } - ip, err := a.ipClient.Get(resourceGroup, name, "") + resp, err := poller.PollUntilDone(ctx, nil) if err != nil { - return nil, err + return armnetwork.PublicIPAddress{}, err } - return &ip, nil + return resp.PublicIPAddress, nil } func (a *API) GetPublicIP(name, resourceGroup string) (string, error) { - ip, err := a.ipClient.Get(resourceGroup, name, "") + resp, err := a.ipClient.Get(context.Background(), resourceGroup, name, &armnetwork.PublicIPAddressesClientGetOptions{Expand: nil}) if err != nil { return "", err } - if ip.PublicIPAddressPropertiesFormat.IPAddress == nil { + ip := resp.PublicIPAddress + if ip.Properties.IPAddress == nil { return "", fmt.Errorf("IP Address is nil") } - return *ip.PublicIPAddressPropertiesFormat.IPAddress, nil + return *ip.Properties.IPAddress, nil } // returns PublicIP, PrivateIP, error @@ -102,62 +117,62 @@ func (a *API) GetIPAddresses(name, publicIPName, resourceGroup string) (string, if err != nil { return "", "", err } + privateIP, err := a.GetPrivateIP(name, resourceGroup) + if err != nil { + return publicIP, "", err + } + return publicIP, privateIP, nil +} - nic, err := a.intClient.Get(resourceGroup, name, "") +func (a *API) GetPrivateIP(interfaceName, resourceGroup string) (string, error) { + resp, err := a.intClient.Get(context.Background(), resourceGroup, interfaceName, &armnetwork.InterfacesClientGetOptions{Expand: nil}) if err != nil { - return "", "", err + return "", err } + nic := resp.Interface - configs := *nic.InterfacePropertiesFormat.IPConfigurations + configs := nic.Properties.IPConfigurations for _, conf := range configs { - if conf.PrivateIPAddress == nil { - return "", "", fmt.Errorf("PrivateIPAddress is nil") + if conf.Properties.PrivateIPAddress == nil { + return "", fmt.Errorf("PrivateIPAddress is nil") } else { - return publicIP, *conf.PrivateIPAddress, nil + return *conf.Properties.PrivateIPAddress, nil } } - return "", "", fmt.Errorf("no ip configurations found") -} - -func (a *API) GetPrivateIP(name, resourceGroup string) (string, error) { - nic, err := a.intClient.Get(resourceGroup, name, "") - if err != nil { - return "", err - } - - configs := *nic.InterfacePropertiesFormat.IPConfigurations - return *configs[0].PrivateIPAddress, nil + return "", fmt.Errorf("no private configurations found") } -func (a *API) createNIC(ip *network.PublicIPAddress, subnet *network.Subnet, resourceGroup string) (*network.Interface, error) { +func (a *API) createNIC(ip armnetwork.PublicIPAddress, subnet *armnetwork.Subnet, resourceGroup string) (armnetwork.Interface, error) { name := randomName("nic") ipconf := randomName("nic-ipconf") + ctx := context.Background() - _, err := a.intClient.CreateOrUpdate(resourceGroup, name, network.Interface{ - Location: &a.opts.Location, - InterfacePropertiesFormat: &network.InterfacePropertiesFormat{ - IPConfigurations: &[]network.InterfaceIPConfiguration{ + poller, err := a.intClient.BeginCreateOrUpdate(ctx, resourceGroup, name, armnetwork.Interface{ + Location: to.Ptr(a.opts.Location), + Properties: &armnetwork.InterfacePropertiesFormat{ + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: &ipconf, - InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{ - PublicIPAddress: ip, - PrivateIPAllocationMethod: network.Dynamic, + Name: to.Ptr(ipconf), + Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + PublicIPAddress: to.Ptr(ip), + PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), Subnet: subnet, }, }, }, - EnableAcceleratedNetworking: util.BoolToPtr(true), + EnableAcceleratedNetworking: to.Ptr(true), }, }, nil) if err != nil { - return nil, err + return armnetwork.Interface{}, err } - nic, err := a.intClient.Get(resourceGroup, name, "") + resp, err := poller.PollUntilDone(ctx, nil) if err != nil { - return nil, err + return armnetwork.Interface{}, err } + nic := resp.Interface - return &nic, nil + return nic, nil } diff --git a/mantle/platform/api/azure/options.go b/mantle/platform/api/azure/options.go index cc73229e..ea6cd0ee 100644 --- a/mantle/platform/api/azure/options.go +++ b/mantle/platform/api/azure/options.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +22,7 @@ import ( type Options struct { *platform.Options - AzureProfile string - AzureAuthLocation string + AzureCredentials string AzureSubscription string DiskURI string diff --git a/mantle/platform/api/azure/storage.go b/mantle/platform/api/azure/storage.go index 2e8db1c7..2e6336b8 100644 --- a/mantle/platform/api/azure/storage.go +++ b/mantle/platform/api/azure/storage.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,29 +16,178 @@ package azure import ( + "context" "fmt" + "io" + "os" "strings" - "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + + "github.com/frostschutz/go-fibmap" ) -func (a *API) GetStorageServiceKeys(account, resourceGroup string) (storage.AccountListKeysResult, error) { - return a.accClient.ListKeys(resourceGroup, account) +func (a *API) GetStorageServiceKeys(account, resourceGroup string) (armstorage.AccountListKeysResult, error) { + resp, err := a.accClient.ListKeys(context.Background(), resourceGroup, account, &armstorage.AccountsClientListKeysOptions{Expand: nil}) + if err != nil { + return armstorage.AccountListKeysResult{}, err + } + return resp.AccountListKeysResult, nil } func (a *API) CreateStorageAccount(resourceGroup string) (string, error) { // Only lower-case letters & numbers allowed in storage account names name := strings.Replace(randomName("kolasa"), "-", "", -1) - parameters := storage.AccountCreateParameters{ - Sku: &storage.Sku{ - Name: "Standard_LRS", + parameters := armstorage.AccountCreateParameters{ + SKU: &armstorage.SKU{ + Name: to.Ptr(armstorage.SKUNameStandardLRS), }, - Kind: "Storage", - Location: &a.opts.Location, + Kind: to.Ptr(armstorage.KindStorage), + Location: to.Ptr(a.opts.Location), } - _, err := a.accClient.Create(resourceGroup, name, parameters, nil) + ctx := context.Background() + poller, err := a.accClient.BeginCreate(ctx, resourceGroup, name, parameters, nil) if err != nil { return "", fmt.Errorf("creating storage account: %v", err) } - return name, nil + _, err = poller.PollUntilDone(ctx, nil) + return name, err +} + +func (a *API) GetBlockBlob(storageaccount, key, container, name string) (io.ReadCloser, error) { + client, err := getBlockBlobClient(storageaccount, key) + if err != nil { + return nil, err + } + + resp, err := client.DownloadStream(context.Background(), container, name, nil) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +func (a *API) PageBlobExists(storageaccount, key, container, blobname string) (bool, error) { + client, err := getPageBlobClient(storageaccount, key, container, blobname) + if err != nil { + return false, err + } + // Use GetProperties here since there isn't a better way to detect + // if a page blob exists. + _, err = client.GetProperties(context.Background(), nil) + if err != nil { + if bloberror.HasCode(err, bloberror.BlobNotFound) { + return false, nil + } else { + return false, err + } + } + return true, nil +} + +func (a *API) UploadPageBlob(storageaccount, key, file, container, blobname string) error { + client, err := getPageBlobClient(storageaccount, key, container, blobname) + if err != nil { + return err + } + // Open the file and get the size in bytes + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return err + } + size := fi.Size() + + // Create the page blob + ctx := context.Background() + _, err = client.Create(ctx, size, nil) + if err != nil { + return err + } + + // Find the data (non-zero) ranges in the file and then chunk up + // those data ranges so they are in 4MiB segments which is the + // maxiumum that can be uploaded in one call to UploadPages(). + dataRanges := fibmap.NewFibmapFile(f).SeekDataHole() + var chunkedDataRanges []int64 + dataSize, fourMB := int64(0), int64(4*1024*1024) + for i := 0; i < len(dataRanges); i += 2 { + offset, count := dataRanges[i], dataRanges[i+1] + end := offset + count + dataSize += count + for offset < end { + chunk := fourMB + if (end - offset) < fourMB { + chunk = end - offset + } + chunkedDataRanges = append(chunkedDataRanges, offset, chunk) + offset += chunk + } + } + fmt.Printf("\nEffective upload size: %d MiB (from %d MiB originally)\n", dataSize/1024/1024, size/1024/1024) + + // Upload the data using UploadPages() and show progress. Use a SectionReader + // to give the UploadPages a specific window of data to operate on. Use + // streaming.NopCloser to allow passing in a Reader with no Close() implementation. + uploaded := int64(0) + for i := 0; i < len(chunkedDataRanges); i += 2 { + offset, count := chunkedDataRanges[i], chunkedDataRanges[i+1] + sr := io.NewSectionReader(f, offset, count) + _, err = client.UploadPages(ctx, streaming.NopCloser(sr), blob.HTTPRange{ + Offset: offset, + Count: count, + }, nil) + if err != nil { + return err + } + uploaded += count + fmt.Printf("\033[2K\rProgress: %v%%", uploaded*100/dataSize) + } + return nil +} + +func (a *API) DeletePageBlob(storageaccount, key, container, blobname string) error { + client, err := getPageBlobClient(storageaccount, key, container, blobname) + if err != nil { + return err + } + _, err = client.Delete(context.Background(), nil) + return err +} + +func (a *API) DeleteBlockBlob(storageaccount, key, container, blob string) error { + client, err := getBlockBlobClient(storageaccount, key) + if err != nil { + return err + } + _, err = client.DeleteBlob(context.Background(), container, blob, nil) + return err +} + +func getBlockBlobClient(storageaccount, key string) (*azblob.Client, error) { + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", storageaccount) + cred, err := azblob.NewSharedKeyCredential(storageaccount, key) + if err != nil { + return nil, err + } + return azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) +} + +func getPageBlobClient(storageaccount, key, container, blobname string) (*pageblob.Client, error) { + pageBlobURL := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", storageaccount, container, blobname) + cred, err := azblob.NewSharedKeyCredential(storageaccount, key) + if err != nil { + return nil, err + } + return pageblob.NewClientWithSharedKeyCredential(pageBlobURL, cred, nil) } diff --git a/mantle/platform/api/azure/storage_mit.go b/mantle/platform/api/azure/storage_mit.go deleted file mode 100644 index d58e7a74..00000000 --- a/mantle/platform/api/azure/storage_mit.go +++ /dev/null @@ -1,260 +0,0 @@ -// Azure VHD Utilities for Go -// Copyright (c) Microsoft Corporation -// -// All rights reserved. -// -// MIT License -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// -// derived from https://github.com/Microsoft/azure-vhd-utils/blob/8fcb4e03cb4c0f928aa835c21708182dbb23fc83/vhdUploadCmdHandler.go - -package azure - -import ( - "fmt" - "io" - - "github.com/Azure/azure-sdk-for-go/storage" - "github.com/Microsoft/azure-vhd-utils/upload" - "github.com/Microsoft/azure-vhd-utils/upload/metadata" - "github.com/Microsoft/azure-vhd-utils/vhdcore/common" - "github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream" - "github.com/coreos/pkg/multierror" -) - -const pageBlobPageSize int64 = 2 * 1024 * 1024 - -type BlobExistsError string - -func (be BlobExistsError) Error() string { - return fmt.Sprintf("blob %q already exists", string(be)) -} - -func (a *API) BlobExists(storageaccount, storagekey, container, blob string) (bool, error) { - sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) - if err != nil { - return false, err - } - - bsc := sc.GetBlobService() - - return bsc.BlobExists(container, blob) -} - -func (a *API) GetBlob(storageaccount, storagekey, container, name string) (io.ReadCloser, error) { - sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) - if err != nil { - return nil, err - } - - bsc := sc.GetBlobService() - if _, err = bsc.CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate); err != nil { - return nil, err - } - - return bsc.GetBlob(container, name) -} - -// DeleteBlob deletes the given blob specified by the given storage account, -// container, and blob name. -func (a *API) DeleteBlob(storageaccount, storagekey, container, blob string) error { - sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) - if err != nil { - return err - } - - bsc := sc.GetBlobService() - if _, err = bsc.CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate); err != nil { - return err - } - - err = bsc.DeleteBlob(container, blob, nil) - if err != nil { - return err - } - - return nil -} - -// UploadBlob uploads vhd to the given storage account, container, and blob name. -// -// It returns BlobExistsError if the blob exists and overwrite is not true. -func (a *API) UploadBlob(storageaccount, storagekey, vhd, container, blob string, overwrite bool) error { - ds, err := diskstream.CreateNewDiskStream(vhd) - if err != nil { - return err - } - defer ds.Close() - - sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) - if err != nil { - return err - } - - bsc := sc.GetBlobService() - if _, err = bsc.CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate); err != nil { - return err - } - - blobExists, err := bsc.BlobExists(container, blob) - if err != nil { - return err - } - - resume := false - var blobMetaData *metadata.MetaData - if blobExists { - if !overwrite { - bm, err := getBlobMetaData(bsc, container, blob) - if err != nil { - return err - } - blobMetaData = bm - resume = true - plog.Printf("Blob with name '%s' already exists, checking if upload can be resumed", blob) - } - } - - localMetaData, err := getLocalVHDMetaData(vhd) - if err != nil { - return err - } - var rangesToSkip []*common.IndexRange - if resume { - if errs := metadata.CompareMetaData(blobMetaData, localMetaData); len(errs) != 0 { - return multierror.Error(errs) - } - ranges, err := getAlreadyUploadedBlobRanges(bsc, container, blob) - if err != nil { - return err - } - rangesToSkip = ranges - } else { - if err := createBlob(bsc, container, blob, ds.GetSize(), localMetaData); err != nil { - return err - } - } - - uploadableRanges, err := upload.LocateUploadableRanges(ds, rangesToSkip, pageBlobPageSize) - if err != nil { - return err - } - - uploadableRanges, err = upload.DetectEmptyRanges(ds, uploadableRanges) - if err != nil { - return err - } - - cxt := &upload.DiskUploadContext{ - VhdStream: ds, - UploadableRanges: uploadableRanges, - AlreadyProcessedBytes: common.TotalRangeLength(rangesToSkip), - BlobServiceClient: bsc, - ContainerName: container, - BlobName: blob, - Parallelism: 8, - Resume: resume, - MD5Hash: localMetaData.FileMetaData.MD5Hash, - } - - return upload.Upload(cxt) -} - -// getBlobMetaData returns the custom metadata associated with a page blob which is set by createBlob method. -// The parameter client is the Azure blob service client, parameter containerName is the name of an existing container -// in which the page blob resides, parameter blobName is name for the page blob -// This method attempt to fetch the metadata only if MD5Hash is not set for the page blob, this method panic if the -// MD5Hash is already set or if the custom metadata is absent. -// -func getBlobMetaData(client storage.BlobStorageClient, containerName, blobName string) (*metadata.MetaData, error) { - md5Hash, err := getBlobMD5Hash(client, containerName, blobName) - if md5Hash != "" { - return nil, BlobExistsError(blobName) - } - if err != nil { - return nil, err - } - - blobMetaData, err := metadata.NewMetadataFromBlob(client, containerName, blobName) - if err != nil { - return nil, err - } - - if blobMetaData == nil { - return nil, fmt.Errorf("There is no upload metadata associated with the existing blob '%s', so upload operation cannot be resumed, use --overwrite option.", blobName) - } - - return blobMetaData, nil -} - -// getLocalVHDMetaData returns the metadata of a local VHD -// -func getLocalVHDMetaData(localVHDPath string) (*metadata.MetaData, error) { - localMetaData, err := metadata.NewMetaDataFromLocalVHD(localVHDPath) - if err != nil { - return nil, err - } - return localMetaData, nil -} - -// createBlob creates a page blob of specific size and sets custom metadata -// The parameter client is the Azure blob service client, parameter containerName is the name of an existing container -// in which the page blob needs to be created, parameter blobName is name for the new page blob, size is the size of -// the new page blob in bytes and parameter vhdMetaData is the custom metadata to be associacted with the page blob -// -func createBlob(client storage.BlobStorageClient, containerName, blobName string, size int64, vhdMetaData *metadata.MetaData) error { - if err := client.PutPageBlob(containerName, blobName, size, nil); err != nil { - return err - } - m, _ := vhdMetaData.ToMap() - if err := client.SetBlobMetadata(containerName, blobName, m, make(map[string]string)); err != nil { - return err - } - - return nil -} - -// getAlreadyUploadedBlobRanges returns the range slice containing ranges of a page blob those are already uploaded. -// The parameter client is the Azure blob service client, parameter containerName is the name of an existing container -// in which the page blob resides, parameter blobName is name for the page blob -// -func getAlreadyUploadedBlobRanges(client storage.BlobStorageClient, containerName, blobName string) ([]*common.IndexRange, error) { - existingRanges, err := client.GetPageRanges(containerName, blobName) - if err != nil { - return nil, err - } - var rangesToSkip = make([]*common.IndexRange, len(existingRanges.PageList)) - for i, r := range existingRanges.PageList { - rangesToSkip[i] = common.NewIndexRange(r.Start, r.End) - } - return rangesToSkip, nil -} - -// getBlobMD5Hash returns the MD5Hash associated with a blob -// The parameter client is the Azure blob service client, parameter containerName is the name of an existing container -// in which the page blob resides, parameter blobName is name for the page blob -// -func getBlobMD5Hash(client storage.BlobStorageClient, containerName, blobName string) (string, error) { - properties, err := client.GetBlobProperties(containerName, blobName) - if err != nil { - return "", err - } - return properties.ContentMD5, nil -} diff --git a/mantle/platform/api/gcloud/api.go b/mantle/platform/api/gcloud/api.go index 3b210ad5..134b8149 100644 --- a/mantle/platform/api/gcloud/api.go +++ b/mantle/platform/api/gcloud/api.go @@ -33,15 +33,16 @@ var ( ) type Options struct { - Image string - Project string - Zone string - MachineType string - DiskType string - Network string - ServiceAcct string - JSONKeyFile string - ServiceAuth bool + Image string + Project string + Zone string + MachineType string + DiskType string + Network string + ServiceAcct string + JSONKeyFile string + ServiceAuth bool + Confidential bool *platform.Options } diff --git a/mantle/platform/api/gcloud/compute.go b/mantle/platform/api/gcloud/compute.go index 88416501..68a3b5b7 100644 --- a/mantle/platform/api/gcloud/compute.go +++ b/mantle/platform/api/gcloud/compute.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "github.com/coreos/coreos-assembler/mantle/platform" "github.com/coreos/coreos-assembler/mantle/util" "golang.org/x/crypto/ssh/agent" "google.golang.org/api/compute/v1" @@ -33,8 +34,42 @@ func (a *API) vmname() string { return fmt.Sprintf("%s-%x", a.options.BaseName, b) } +// ["5G:channel=nvme"], by default the disk type is local-ssd +func ParseDisk(spec string, zone string) (*compute.AttachedDisk, error) { + var diskInterface string + + size, diskmap, err := util.ParseDiskSpec(spec) + if err != nil { + return nil, fmt.Errorf("failed to parse disk spec %q: %w", spec, err) + } + for key, value := range diskmap { + switch key { + case "channel": + switch value { + case "nvme", "scsi": + diskInterface = strings.ToUpper(value) + default: + return nil, fmt.Errorf("invalid channel value: %q", value) + } + default: + return nil, fmt.Errorf("invalid key %q", key) + } + } + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: false, + Type: "SCRATCH", + Interface: diskInterface, + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskType: "/zones/" + zone + "/diskTypes/local-ssd", + DiskSizeGb: size, + }, + }, nil +} + // Taken from: https://github.com/golang/build/blob/master/buildlet/gce.go -func (a *API) mkinstance(userdata, name string, keys []*agent.Key, useServiceAcct bool) *compute.Instance { +func (a *API) mkinstance(userdata, name string, keys []*agent.Key, opts platform.MachineOptions, useServiceAcct bool) (*compute.Instance, error) { mantle := "mantle" metadataItems := []*compute.MetadataItems{ { @@ -111,21 +146,40 @@ func (a *API) mkinstance(userdata, name string, keys []*agent.Key, useServiceAcc Value: &userdata, }) } - - return instance - + // create confidential instance + if a.options.Confidential { + instance.ConfidentialInstanceConfig = &compute.ConfidentialInstanceConfig{ + EnableConfidentialCompute: true, + } + instance.Scheduling = &compute.Scheduling{ + OnHostMaintenance: "TERMINATE", + } + } + // attach aditional disk + for _, spec := range opts.AdditionalDisks { + plog.Debugf("Parsing disk spec %q\n", spec) + disk, err := ParseDisk(spec, a.options.Zone) + if err != nil { + return nil, fmt.Errorf("failed to parse spec %q: %w", spec, err) + } + instance.Disks = append(instance.Disks, disk) + } + return instance, nil } // CreateInstance creates a Google Compute Engine instance. -func (a *API) CreateInstance(userdata string, keys []*agent.Key, useServiceAcct bool) (*compute.Instance, error) { +func (a *API) CreateInstance(userdata string, keys []*agent.Key, opts platform.MachineOptions, useServiceAcct bool) (*compute.Instance, error) { name := a.vmname() - inst := a.mkinstance(userdata, name, keys, useServiceAcct) + inst, err := a.mkinstance(userdata, name, keys, opts, useServiceAcct) + if err != nil { + return nil, fmt.Errorf("failed to create instance %q: %w", name, err) + } plog.Debugf("Creating instance %q", name) op, err := a.compute.Instances.Insert(a.options.Project, a.options.Zone, inst).Do() if err != nil { - return nil, fmt.Errorf("failed to request new GCE instance: %v\n", err) + return nil, fmt.Errorf("failed to request new GCP instance: %v\n", err) } doable := a.compute.ZoneOperations.Get(a.options.Project, a.options.Zone, op.Name) diff --git a/mantle/platform/api/gcloud/image.go b/mantle/platform/api/gcloud/image.go index 505316a2..f47edf23 100644 --- a/mantle/platform/api/gcloud/image.go +++ b/mantle/platform/api/gcloud/image.go @@ -16,6 +16,7 @@ package gcloud import ( "fmt" + "runtime" "strings" "golang.org/x/net/context" @@ -32,11 +33,12 @@ const ( ) type ImageSpec struct { - SourceImage string - Family string - Name string - Description string - Licenses []string // short names + Architecture string + SourceImage string + Family string + Name string + Description string + Licenses []string // short names } const endpointPrefix = "https://www.googleapis.com/compute/v1/" @@ -65,7 +67,7 @@ func getImageAPIEndpoint(image, project string) (string, error) { " begin with 'projects/', or use the short name") } -// CreateImage creates an image on GCE and returns operation details and +// CreateImage creates an image on GCP and returns operation details and // a Pending. If overwrite is true, an existing image will be overwritten // if it exists. func (a *API) CreateImage(spec *ImageSpec, overwrite bool) (*compute.Operation, *Pending, error) { @@ -78,12 +80,24 @@ func (a *API) CreateImage(spec *ImageSpec, overwrite bool) (*compute.Operation, // If not in URI format then query GCP for that info license, err := a.compute.Licenses.Get(a.options.Project, l).Do() if err != nil { - return nil, nil, fmt.Errorf("Invalid GCE license %s: %v", l, err) + return nil, nil, fmt.Errorf("Invalid GCP license %s: %v", l, err) } licenses[i] = license.SelfLink } } + if spec.Architecture == "" { + spec.Architecture = runtime.GOARCH + } + switch spec.Architecture { + case "amd64", "x86_64": + spec.Architecture = "X86_64" + case "arm64", "aarch64": + spec.Architecture = "ARM64" + default: + return nil, nil, fmt.Errorf("unsupported gcp architecture %q", spec.Architecture) + } + if overwrite { plog.Debugf("Overwriting image %q", spec.Name) // delete existing image, ignore error since it might not exist. @@ -107,12 +121,25 @@ func (a *API) CreateImage(spec *ImageSpec, overwrite bool) (*compute.Operation, { Type: "VIRTIO_SCSI_MULTIQUEUE", }, + // RHEL supports this since 8.4; TODO share logic here with + // https://github.com/osbuild/osbuild-composer/blob/c6570f6c94149b47f2f8e2f82d7467d6b96755bb/internal/cloud/gcp/compute.go#L16 + { + Type: "SEV_CAPABLE", + }, + { + Type: "GVNIC", + }, { Type: "UEFI_COMPATIBLE", }, + // https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential + { + Type: "SEV_SNP_CAPABLE", + }, } image := &compute.Image{ + Architecture: spec.Architecture, Family: spec.Family, Name: spec.Name, Description: spec.Description, @@ -148,7 +175,7 @@ func (a *API) ListImages(ctx context.Context, prefix string, family string) ([]* return nil }) if err != nil { - return nil, fmt.Errorf("Listing GCE images failed: %v", err) + return nil, fmt.Errorf("Listing GCP images failed: %v", err) } return images, nil } diff --git a/mantle/platform/api/ibmcloud/s3.go b/mantle/platform/api/ibmcloud/s3.go index 5dcd0352..ee571556 100644 --- a/mantle/platform/api/ibmcloud/s3.go +++ b/mantle/platform/api/ibmcloud/s3.go @@ -134,7 +134,7 @@ func (a *API) checkIfObjectExists(objectName, bucketName string) bool { return err == nil } -//UploadObject - upload to s3 bucket +// UploadObject - upload to s3 bucket func (a *API) UploadObject(r io.Reader, objectName, bucketName string, force bool) error { // check if image exists and force is not set then bail if !force { diff --git a/mantle/platform/api/openstack/api.go b/mantle/platform/api/openstack/api.go index e9ae1d00..c2b1cfff 100644 --- a/mantle/platform/api/openstack/api.go +++ b/mantle/platform/api/openstack/api.go @@ -328,6 +328,9 @@ func (a *API) CreateServer(name, sshKeyID, userdata string) (*Server, error) { if err != nil { return false, err } + if server.Status == "ERROR" { + return false, fmt.Errorf("Server reported ERROR status: %v", server.Fault) + } return server.Status == "ACTIVE", nil }) if err != nil { diff --git a/mantle/platform/machine/gcloud/cluster.go b/mantle/platform/machine/gcloud/cluster.go index fedee22c..5a3ed5f1 100644 --- a/mantle/platform/machine/gcloud/cluster.go +++ b/mantle/platform/machine/gcloud/cluster.go @@ -37,20 +37,17 @@ func (gc *cluster) NewMachine(userdata *conf.UserData) (platform.Machine, error) } func (gc *cluster) NewMachineWithOptions(userdata *conf.UserData, options platform.MachineOptions) (platform.Machine, error) { - if len(options.AdditionalDisks) > 0 { - return nil, errors.New("platform gce does not yet support additional disks") - } if options.MultiPathDisk { - return nil, errors.New("platform gce does not support multipathed disks") + return nil, errors.New("platform gcp does not support multipathed disks") } if options.AdditionalNics > 0 { - return nil, errors.New("platform gce does not support additional nics") + return nil, errors.New("platform gcp does not support additional nics") } if options.AppendKernelArgs != "" { - return nil, errors.New("platform gce does not support appending kernel arguments") + return nil, errors.New("platform gcp does not support appending kernel arguments") } if options.AppendFirstbootKernelArgs != "" { - return nil, errors.New("platform gce does not support appending firstboot kernel arguments") + return nil, errors.New("platform gcp does not support appending firstboot kernel arguments") } conf, err := gc.RenderUserData(userdata, map[string]string{ @@ -69,7 +66,7 @@ func (gc *cluster) NewMachineWithOptions(userdata *conf.UserData, options platfo } } - instance, err := gc.flight.api.CreateInstance(conf.String(), keys, !gc.RuntimeConf().NoInstanceCreds) + instance, err := gc.flight.api.CreateInstance(conf.String(), keys, options, !gc.RuntimeConf().NoInstanceCreds) if err != nil { return nil, err } diff --git a/mantle/platform/machine/qemu/cluster.go b/mantle/platform/machine/qemu/cluster.go index b789d725..5bd2005d 100644 --- a/mantle/platform/machine/qemu/cluster.go +++ b/mantle/platform/machine/qemu/cluster.go @@ -71,17 +71,6 @@ func (qc *Cluster) NewMachineWithQemuOptions(userdata *conf.UserData, options pl } qc.mu.Unlock() - var confPath string - if conf.IsIgnition() { - confPath = filepath.Join(dir, "ignition.json") - if err := conf.WriteFile(confPath); err != nil { - return nil, err - } - } else if conf.IsEmpty() { - } else { - return nil, fmt.Errorf("qemu only supports Ignition or empty configs") - } - journal, err := platform.NewJournal(dir) if err != nil { return nil, err @@ -98,6 +87,24 @@ func (qc *Cluster) NewMachineWithQemuOptions(userdata *conf.UserData, options pl if options.DisablePDeathSig { builder.Pdeathsig = false } + + if qc.flight.opts.SecureExecution { + if err := builder.SetSecureExecution(qc.flight.opts.SecureExecutionIgnitionPubKey, qc.flight.opts.SecureExecutionHostKey, conf); err != nil { + return nil, err + } + } + + var confPath string + if conf.IsIgnition() { + confPath = filepath.Join(dir, "ignition.json") + if err := conf.WriteFile(confPath); err != nil { + return nil, err + } + } else if conf.IsEmpty() { + } else { + return nil, fmt.Errorf("qemu only supports Ignition or empty configs") + } + builder.ConfigFile = confPath defer builder.Close() builder.UUID = qm.id @@ -121,6 +128,8 @@ func (qc *Cluster) NewMachineWithQemuOptions(userdata *conf.UserData, options pl builder.Memory = int(memory) } else if options.MinMemory != 0 { builder.Memory = options.MinMemory + } else if qc.flight.opts.SecureExecution { + builder.MemoryMiB = 4096 // SE needs at least 4GB } channel := "virtio" diff --git a/mantle/platform/machine/qemu/flight.go b/mantle/platform/machine/qemu/flight.go index 5da7f271..188fd599 100644 --- a/mantle/platform/machine/qemu/flight.go +++ b/mantle/platform/machine/qemu/flight.go @@ -31,7 +31,9 @@ type Options struct { DiskImage string // DiskSize if non-empty will expand the disk DiskSize string - Board string + // DriveOpts is arbitrary comma-separated list of options + DriveOpts string + // Firmware will be passed to qemu Firmware string Memory string Arch string @@ -44,6 +46,11 @@ type Options struct { //Option to create a temporary software TPM - true by default Swtpm bool + //IBM Secure Execution + SecureExecution bool + SecureExecutionIgnitionPubKey string + SecureExecutionHostKey string + *platform.Options } diff --git a/mantle/platform/qemu.go b/mantle/platform/qemu.go index 1c1218b8..d9c003d3 100644 --- a/mantle/platform/qemu.go +++ b/mantle/platform/qemu.go @@ -90,7 +90,8 @@ type Disk struct { BackingFile string // raw disk image to use. BackingFormat string // qcow2, raw, etc. If unspecified will be autodetected. Channel string // virtio (default), nvme - DeviceOpts []string // extra options to pass to qemu. "serial=XXXX" makes disks show up as /dev/disk/by-id/virtio- + DeviceOpts []string // extra options to pass to qemu -device. "serial=XXXX" makes disks show up as /dev/disk/by-id/virtio- + DriveOpts []string // extra options to pass to -drive SectorSize int // if not 0, override disk sector size NbdDisk bool // if true, the disks should be presented over nbd:unix socket MultiPathDisk bool // if true, present multiple paths @@ -100,28 +101,38 @@ type Disk struct { nbdServCmd exec.Cmd // command to serve the disk } -// ParseDiskSpec converts a disk specification into a Disk. The format is: -// [:,,...]. -func ParseDiskSpec(spec string) (*Disk, error) { - split := strings.Split(spec, ":") - var size string +func ParseDisk(spec string) (*Disk, error) { + var channel string + sectorSize := 0 + serialOpt := []string{} multipathed := false - if len(split) == 1 { - size = split[0] - } else if len(split) == 2 { - size = split[0] - for _, opt := range strings.Split(split[1], ",") { - if opt == "mpath" { - multipathed = true - } else { - return nil, fmt.Errorf("unknown disk option %s", opt) - } + + size, diskmap, err := util.ParseDiskSpec(spec) + if err != nil { + return nil, fmt.Errorf("failed to parse disk spec %q: %w", spec, err) + } + + for key, value := range diskmap { + switch key { + case "channel": + channel = value + case "4k": + sectorSize = 4096 + case "mpath": + multipathed = true + case "serial": + value = "serial=" + value + serialOpt = append(serialOpt, value) + default: + return nil, fmt.Errorf("invalid key %q", key) } - } else { - return nil, fmt.Errorf("invalid disk spec %s", spec) } + return &Disk{ - Size: size, + Size: fmt.Sprintf("%dG", size), + Channel: channel, + DeviceOpts: serialOpt, + SectorSize: sectorSize, MultiPathDisk: multipathed, }, nil } @@ -311,7 +322,7 @@ func (inst *QemuInstance) SwitchBootOrder() (err2 error) { } var bootdev, primarydev, secondarydev string - // Get bootdevice for pxe boot + // Get boot device for PXE boots for _, dev := range devs.Return { switch dev.Type { case "child", "child": @@ -320,7 +331,7 @@ func (inst *QemuInstance) SwitchBootOrder() (err2 error) { break } } - // Get boot device (for iso-installs) and block device + // Get boot device for ISO boots and target block device for _, dev := range blkdevs.Return { devpath := filepath.Clean(strings.TrimSuffix(dev.DevicePath, "virtio-backend")) switch dev.Device { @@ -330,11 +341,28 @@ func (inst *QemuInstance) SwitchBootOrder() (err2 error) { primarydev = devpath case "mpath11": secondarydev = devpath + case "": + if dev.Inserted.NodeName == "installiso" { + bootdev = devpath + } default: break } } + if bootdev == "" { + return fmt.Errorf("Could not find boot device using QMP.\n"+ + "Full list of peripherals: %v.\n"+ + "Full list of block devices: %v.\n", + devs.Return, blkdevs.Return) + } + + if primarydev == "" { + return fmt.Errorf("Could not find target disk using QMP.\n"+ + "Full list of block devices: %v.\n", + blkdevs.Return) + } + // unset bootindex for the boot device if err := inst.setBootIndexForDevice(bootdev, -1); err != nil { return errors.Wrapf(err, "Could not set bootindex for bootdev") @@ -453,6 +481,10 @@ type QemuBuilder struct { virtioSerialID uint // fds is file descriptors we own to pass to qemu fds []*os.File + + // IBM Secure Execution + secureExecution bool + ignitionPubKey string } // NewQemuBuilder creates a new build for QEMU with default settings. @@ -467,10 +499,10 @@ func NewQemuBuilder() *QemuBuilder { defaultFirmware = "" } ret := QemuBuilder{ - Firmware: defaultFirmware, - Swtpm: true, - Pdeathsig: true, - Argv: []string{}, + Firmware: defaultFirmware, + Swtpm: true, + Pdeathsig: true, + Argv: []string{}, architecture: coreosarch.CurrentRpmArch(), } return &ret @@ -619,6 +651,56 @@ func (builder *QemuBuilder) SetArchitecture(arch string) error { return fmt.Errorf("architecture %s not supported by coreos-assembler qemu", arch) } +// SetSecureExecution enables qemu confidential guest support and adds hostkey to ignition config. +func (builder *QemuBuilder) SetSecureExecution(gpgkey string, hostkey string, config *conf.Conf) error { + if supports, err := builder.supportsSecureExecution(); err != nil { + return err + } else if !supports { + return fmt.Errorf("Secure Execution was requested but isn't supported/enabled") + } + if gpgkey == "" { + return fmt.Errorf("Secure Execution was requested, but we don't have a GPG Public Key to encrypt the config") + } + + if config != nil { + if hostkey == "" { + // dummy hostkey; this is good enough at least for the first boot (to prevent genprotimg from failing) + dummy, err := builder.TempFile("hostkey.*") + if err != nil { + return fmt.Errorf("creating hostkey: %v", err) + } + c := exec.Command("openssl", "req", "-x509", "-sha512", "-nodes", "-days", "1", "-subj", "/C=US/O=IBM/CN=secex", + "-newkey", "ec", "-pkeyopt", "ec_paramgen_curve:secp521r1", "-out", dummy.Name()) + if err := c.Run(); err != nil { + return fmt.Errorf("generating hostkey: %v", err) + } + hostkey = dummy.Name() + } + if contents, err := os.ReadFile(hostkey); err != nil { + return fmt.Errorf("reading hostkey: %v", err) + } else { + config.AddFile("/etc/se-hostkeys/ibm-z-hostkey-1", string(contents), 0644) + } + } + builder.secureExecution = true + builder.ignitionPubKey = gpgkey + builder.Append("-object", "s390-pv-guest,id=pv0", "-machine", "confidential-guest-support=pv0") + return nil +} + +func (builder *QemuBuilder) encryptIgnitionConfig() error { + crypted, err := builder.TempFile("ignition_crypted.*") + if err != nil { + return fmt.Errorf("creating crypted config: %v", err) + } + c := exec.Command("gpg", "--recipient-file", builder.ignitionPubKey, "--yes", "--output", crypted.Name(), "--armor", "--encrypt", builder.ConfigFile) + if err := c.Run(); err != nil { + return fmt.Errorf("encrypting %s: %v", crypted.Name(), err) + } + builder.ConfigFile = crypted.Name() + return nil +} + // Mount9p sets up a mount point from the host to guest. To be replaced // with https://virtio-fs.gitlab.io/ once it lands everywhere. func (builder *QemuBuilder) Mount9p(source, destHint string, readonly bool) { @@ -641,6 +723,25 @@ func (builder *QemuBuilder) supportsFwCfg() bool { return true } +// supportsSecureExecution if s390x host (zKVM/LPAR) has "Secure Execution for Linux" feature enabled +func (builder *QemuBuilder) supportsSecureExecution() (bool, error) { + if builder.architecture != "s390x" { + return false, nil + } + content, err := os.ReadFile("/sys/firmware/uv/prot_virt_host") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, fmt.Errorf("reading protvirt flag: %v", err) + } + if len(content) < 1 { + return false, nil + } + enabled := content[0] == '1' + return enabled, nil +} + // supportsSwtpm if the target system supports a virtual TPM device func (builder *QemuBuilder) supportsSwtpm() bool { switch builder.architecture { @@ -738,12 +839,20 @@ func newGuestfish(arch, diskImagePath string, diskSectorSize int) (*coreosGuestf return nil, errors.Wrapf(err, "guestfish launch failed") } + rootfs, err := findLabel("root", pid) + if err != nil { + return nil, errors.Wrapf(err, "guestfish command failed to find root label") + } + if err := exec.Command("guestfish", remote, "mount", rootfs, "/").Run(); err != nil { + return nil, errors.Wrapf(err, "guestfish root mount failed") + } + bootfs, err := findLabel("boot", pid) if err != nil { return nil, errors.Wrapf(err, "guestfish command failed to find boot label") } - if err := exec.Command("guestfish", remote, "mount", bootfs, "/").Run(); err != nil { + if err := exec.Command("guestfish", remote, "mount", bootfs, "/boot").Run(); err != nil { return nil, errors.Wrapf(err, "guestfish boot mount failed") } @@ -768,25 +877,29 @@ func setupPreboot(arch, confPath, firstbootkargs, kargs string, diskImagePath st defer gf.destroy() if confPath != "" { - if err := exec.Command("guestfish", gf.remote, "mkdir-p", "/ignition").Run(); err != nil { + if err := exec.Command("guestfish", gf.remote, "mkdir-p", "/boot/ignition").Run(); err != nil { return errors.Wrapf(err, "guestfish directory creation failed") } - if err := exec.Command("guestfish", gf.remote, "upload", confPath, fileRemoteLocation).Run(); err != nil { + if err := exec.Command("guestfish", gf.remote, "upload", confPath, "/boot"+fileRemoteLocation).Run(); err != nil { return errors.Wrapf(err, "guestfish upload failed") } } // See /boot/grub2/grub.cfg if firstbootkargs != "" { - grubStr := fmt.Sprintf("set ignition_network_kcmdline='%s'\n", firstbootkargs) - if err := exec.Command("guestfish", gf.remote, "write", "/ignition.firstboot", grubStr).Run(); err != nil { + grubStr := fmt.Sprintf("set ignition_network_kcmdline=\"%s\"\n", firstbootkargs) + if err := exec.Command("guestfish", gf.remote, "write", "/boot/ignition.firstboot", grubStr).Run(); err != nil { return errors.Wrapf(err, "guestfish write") } } - - if kargs != "" { - confpathout, err := exec.Command("guestfish", gf.remote, "glob-expand", "/loader/entries/ostree*conf").Output() + // Parsing BLS + var linux string + var initrd string + var allkargs string + zipl_sync := arch == "s390x" && (firstbootkargs != "" || kargs != "") + if kargs != "" || zipl_sync { + confpathout, err := exec.Command("guestfish", gf.remote, "glob-expand", "/boot/loader/entries/ostree*conf").Output() if err != nil { return errors.Wrapf(err, "finding bootloader config path") } @@ -795,7 +908,6 @@ func setupPreboot(arch, confPath, firstbootkargs, kargs string, diskImagePath st return fmt.Errorf("Multiple values for bootloader config: %v", confpathout) } confpath := confs[0] - origconf, err := exec.Command("guestfish", gf.remote, "read-file", confpath).Output() if err != nil { return errors.Wrapf(err, "reading bootloader config") @@ -804,12 +916,27 @@ func setupPreboot(arch, confPath, firstbootkargs, kargs string, diskImagePath st for _, line := range strings.Split(string(origconf), "\n") { if strings.HasPrefix(line, "options ") { line += " " + kargs + allkargs = strings.TrimPrefix(line, "options ") + } else if strings.HasPrefix(line, "linux ") { + linux = "/boot" + strings.TrimPrefix(line, "linux ") + } else if strings.HasPrefix(line, "initrd ") { + initrd = "/boot" + strings.TrimPrefix(line, "initrd ") } buf.Write([]byte(line)) buf.Write([]byte("\n")) } - if err := exec.Command("guestfish", gf.remote, "write", confpath, buf.String()).Run(); err != nil { - return errors.Wrapf(err, "writing bootloader config") + if kargs != "" { + if err := exec.Command("guestfish", gf.remote, "write", confpath, buf.String()).Run(); err != nil { + return errors.Wrapf(err, "writing bootloader config") + } + } + } + + // s390x requires zipl to update low-level data on block device + if zipl_sync { + allkargs = strings.TrimSpace(allkargs + " ignition.firstboot " + firstbootkargs) + if err := runZipl(gf, linux, initrd, allkargs); err != nil { + return errors.Wrapf(err, "running zipl") } } @@ -819,6 +946,40 @@ func setupPreboot(arch, confPath, firstbootkargs, kargs string, diskImagePath st return nil } +func runZipl(gf *coreosGuestfish, linux string, initrd string, options string) error { + // Detecting ostree commit + deploy, err := exec.Command("guestfish", gf.remote, "glob-expand", "/ostree/deploy/*/deploy/*.0").Output() + if err != nil { + return errors.Wrapf(err, "finding deploy path") + } + sysroot := strings.TrimSpace(string(deploy)) + // Saving cmdline + if err := exec.Command("guestfish", gf.remote, "write", "/boot/zipl.cmdline", options+"\n").Run(); err != nil { + return errors.Wrapf(err, "writing zipl cmdline") + } + // Bind-mounting for chroot + if err := exec.Command("guestfish", gf.remote, "debug", "sh", fmt.Sprintf("'mount -t devtmpfs none /sysroot/%s/dev'", sysroot)).Run(); err != nil { + return errors.Wrapf(err, "bind-mounting devtmpfs") + } + if err := exec.Command("guestfish", gf.remote, "debug", "sh", fmt.Sprintf("'mount -t proc none /sysroot/%s/proc'", sysroot)).Run(); err != nil { + return errors.Wrapf(err, "bind-mounting /proc") + } + if err := exec.Command("guestfish", gf.remote, "debug", "sh", fmt.Sprintf("'mount -o bind /sysroot/boot /sysroot/%s/boot'", sysroot)).Run(); err != nil { + return errors.Wrapf(err, "bind-mounting /boot") + } + // chroot zipl + cmd := exec.Command("guestfish", gf.remote, "debug", "sh", fmt.Sprintf("'chroot /sysroot/%s /sbin/zipl -i %s -r %s -p /boot/zipl.cmdline -t /boot'", sysroot, linux, initrd)) + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "running zipl") + } + // clean-up + if err := exec.Command("guestfish", gf.remote, "rm-f", "/boot/zipl.cmdline").Run(); err != nil { + return errors.Wrapf(err, "writing zipl cmdline") + } + return nil +} + func resolveBackingFile(backingFile string) (string, error) { backingFile, err := filepath.Abs(backingFile) if err != nil { @@ -915,18 +1076,22 @@ func (builder *QemuBuilder) addDiskImpl(disk *Disk, primary bool) error { return err } if primary { - // If the board doesn't support -fw_cfg or we were explicitly - // requested, inject via libguestfs on the primary disk. - if err := builder.renderIgnition(); err != nil { - return errors.Wrapf(err, "rendering ignition") - } - requiresInjection := builder.ConfigFile != "" && builder.ForceConfigInjection - if requiresInjection || builder.AppendFirstbootKernelArgs != "" || builder.AppendKernelArgs != "" { - if err := setupPreboot(builder.architecture, builder.ConfigFile, builder.AppendFirstbootKernelArgs, builder.AppendKernelArgs, - disk.dstFileName, disk.SectorSize); err != nil { - return errors.Wrapf(err, "ignition injection with guestfs failed") + // Only try to inject config if it hasn't already been injected somewhere + // else, which can happen when running an ISO install. + if !builder.configInjected { + // If the board doesn't support -fw_cfg or we were explicitly + // requested, inject via libguestfs on the primary disk. + if err := builder.renderIgnition(); err != nil { + return errors.Wrapf(err, "rendering ignition") + } + requiresInjection := builder.ConfigFile != "" && builder.ForceConfigInjection + if requiresInjection || builder.AppendFirstbootKernelArgs != "" || builder.AppendKernelArgs != "" { + if err := setupPreboot(builder.architecture, builder.ConfigFile, builder.AppendFirstbootKernelArgs, builder.AppendKernelArgs, + disk.dstFileName, disk.SectorSize); err != nil { + return errors.Wrapf(err, "ignition injection with guestfs failed") + } + builder.configInjected = true } - builder.configInjected = true } } diskOpts := disk.DeviceOpts @@ -966,6 +1131,9 @@ func (builder *QemuBuilder) addDiskImpl(disk *Disk, primary bool) error { // Avoid file locking detection, and the disks we create // here are always currently ephemeral. defaultDiskOpts := "auto-read-only=off,cache=unsafe" + if len(disk.DriveOpts) > 0 { + defaultDiskOpts += "," + strings.Join(disk.DriveOpts, ",") + } if disk.MultiPathDisk { // Fake a NVME device with a fake WWN. All these attributes are needed in order @@ -1051,7 +1219,7 @@ func (builder *QemuBuilder) AddDisk(disk *Disk) error { // AddDisksFromSpecs adds multiple secondary disks from their specs. func (builder *QemuBuilder) AddDisksFromSpecs(specs []string) error { for _, spec := range specs { - if disk, err := ParseDiskSpec(spec); err != nil { + if disk, err := ParseDisk(spec); err != nil { return errors.Wrapf(err, "parsing additional disk spec '%s'", spec) } else if err = builder.AddDisk(disk); err != nil { return errors.Wrapf(err, "adding additional disk '%s'", spec) @@ -1135,7 +1303,9 @@ func baseQemuArgs(arch string) ([]string, error) { case "ppc64le": ret = []string{ "qemu-system-ppc64", - "-machine", "pseries,kvm-type=HV,vsmt=8,cap-fwnmi=off," + accel, + // kvm-type=HV ensures we use bare metal KVM and not "user mode" + // https://qemu.readthedocs.io/en/latest/system/ppc/pseries.html#switching-between-the-kvm-pr-and-kvm-hv-kernel-module + "-machine", "pseries,kvm-type=HV," + machineArg, } default: return nil, fmt.Errorf("architecture %s not supported for qemu", arch) @@ -1268,6 +1438,7 @@ func (builder *QemuBuilder) setupIso() error { if len(builder.AppendKernelArgs) > 0 { return errors.Wrapf(err, "running `nestos-installer iso kargs modify`; old NestOS ISO?") } + // Only actually emit a warning if we expected it to be supported stderr := stderrb.String() plog.Warningf("running nestos-installer iso kargs modify: %v: %q", err, stderr) plog.Warning("likely targeting an old NestOS ISO; ignoring...") @@ -1301,7 +1472,14 @@ func (builder *QemuBuilder) setupIso() error { // primary disk is selected. This allows us to have "boot once" functionality on // both UEFI and BIOS (`-boot once=d` OTOH doesn't work with OVMF). switch coreosarch.CurrentRpmArch() { - case "s390x", "ppc64le", "aarch64": + case "s390x": + if builder.isoAsDisk { + // we could do it, but boot would fail + return errors.New("cannot attach ISO as disk; no hybrid ISO on this arch") + } + builder.Append("-blockdev", "file,node-name=installiso,filename="+builder.iso.path, + "-device", "virtio-scsi", "-device", "scsi-cd,drive=installiso,bootindex=2") + case "ppc64le", "aarch64": if builder.isoAsDisk { // we could do it, but boot would fail return errors.New("cannot attach ISO as disk; no hybrid ISO on this arch") @@ -1473,16 +1651,24 @@ func (builder *QemuBuilder) Exec() (*QemuInstance, error) { argv = append(argv, "-boot", "order=c,strict=on") } } - // Handle Ignition if it wasn't already injected above if builder.ConfigFile != "" && !builder.configInjected { if builder.supportsFwCfg() { builder.Append("-fw_cfg", "name=opt/com.coreos/config,file="+builder.ConfigFile) } else { + serial := "ignition" + if builder.secureExecution { + // SE case: we have to encrypt the config and attach it with 'serial=ignition_crypted' + if err := builder.encryptIgnitionConfig(); err != nil { + return nil, err + } + serial = "ignition_crypted" + } // Alternative to fw_cfg, should be generally usable on all arches, // especially those without fw_cfg support. // See https://github.com/coreos/ignition/pull/905 - builder.Append("-drive", fmt.Sprintf("if=none,id=ignition,format=raw,file=%s,readonly=on", builder.ConfigFile), "-device", "virtio-blk,serial=ignition,drive=ignition") + builder.Append("-drive", fmt.Sprintf("if=none,id=ignition,format=raw,file=%s,readonly=on", builder.ConfigFile), + "-device", fmt.Sprintf("virtio-blk,serial=%s,drive=ignition", serial)) } } diff --git a/mantle/platform/qmp_util.go b/mantle/platform/qmp_util.go index 4703248c..1cea8c8e 100644 --- a/mantle/platform/qmp_util.go +++ b/mantle/platform/qmp_util.go @@ -37,7 +37,8 @@ type QOMBlkDev struct { DevicePath string `json:"qdev"` Removable bool `json:"removable"` Inserted struct { - BackingFileDepth int `json:"backing_file_depth"` + BackingFileDepth int `json:"backing_file_depth"` + NodeName string `json:"node-name"` } `json:"inserted"` } `json:"return"` } diff --git a/mantle/util/common.go b/mantle/util/common.go index d78d7fa2..94acfac6 100644 --- a/mantle/util/common.go +++ b/mantle/util/common.go @@ -19,6 +19,8 @@ import ( "os" "os/exec" "path/filepath" + "strconv" + "strings" "time" "unsafe" @@ -120,3 +122,37 @@ func RunCmdTimeout(timeout time.Duration, cmd string, args ...string) error { return fmt.Errorf("%s timed out after %s", cmd, timeout) } } + +// ParseDiskSpec converts a disk specification into a Disk. The format is: +// [:,,...], like ["5G:channel=nvme"] +func ParseDiskSpec(spec string) (int64, map[string]string, error) { + diskmap := map[string]string{} + split := strings.Split(spec, ":") + if split[0] == "" || (!strings.HasSuffix(split[0], "G")) { + return 0, nil, fmt.Errorf("invalid size opt %s", spec) + } + var disksize string + if len(split) == 1 { + disksize = split[0] + } else if len(split) == 2 { + disksize = split[0] + for _, opt := range strings.Split(split[1], ",") { + kvsplit := strings.SplitN(opt, "=", 2) + if len(kvsplit) == 0 { + return 0, nil, fmt.Errorf("invalid empty option found in spec %q", spec) + } else if len(kvsplit) == 1 { + diskmap[opt] = "" + } else { + diskmap[kvsplit[0]] = kvsplit[1] + } + } + } else { + return 0, nil, fmt.Errorf("invalid disk spec %s", spec) + } + disksize = strings.TrimSuffix(disksize, "G") + size, err := strconv.ParseInt(disksize, 10, 32) + if err != nil { + return 0, nil, fmt.Errorf("failed to convert %q to int64: %w", disksize, err) + } + return size, diskmap, nil +} diff --git a/src/libguestfish.sh b/src/libguestfish.sh index a8c42dc5..506ce70c 100755 --- a/src/libguestfish.sh +++ b/src/libguestfish.sh @@ -11,8 +11,12 @@ export LIBGUESTFS_BACKEND=direct arch=$(uname -m) + +# Hack to run with a wrapper on older P8 hardware running RHEL7 if [ "$arch" = "ppc64le" ] ; then - export LIBGUESTFS_HV="/usr/lib/coreos-assembler/libguestfs-ppc64le-wrapper.sh" + if [[ "$(uname -r)" =~ "el7" ]]; then + export LIBGUESTFS_HV="/usr/lib/coreos-assembler/libguestfs-ppc64le-wrapper.sh" + fi fi # http://libguestfs.org/guestfish.1.html#using-remote-control-robustly-from-shell-scripts -- Gitee