diff --git a/mantle/cmd/kola/kola.go b/mantle/cmd/kola/kola.go index eb40c385123a9ecc013bfbdd4513eaa2f0760891..0b0998ae8d4faf15513c779603c0abb23ec9730a 100644 --- a/mantle/cmd/kola/kola.go +++ b/mantle/cmd/kola/kola.go @@ -291,7 +291,7 @@ func writeProps() error { Server string `json:"server"` BaseVMName string `json:"base_vm_name"` } - type GCE struct { + type GCP struct { Image string `json:"image"` MachineType string `json:"type"` } @@ -323,7 +323,7 @@ func writeProps() error { Azure Azure `json:"azure"` DO DO `json:"do"` ESX ESX `json:"esx"` - GCE GCE `json:"gce"` + GCP GCP `json:"gcp"` OpenStack OpenStack `json:"openstack"` Packet Packet `json:"packet"` QEMU QEMU `json:"qemu"` @@ -355,9 +355,9 @@ func writeProps() error { Server: kola.ESXOptions.Server, BaseVMName: kola.ESXOptions.BaseVMName, }, - GCE: GCE{ - Image: kola.GCEOptions.Image, - MachineType: kola.GCEOptions.MachineType, + GCP: GCP{ + Image: kola.GCPOptions.Image, + MachineType: kola.GCPOptions.MachineType, }, OpenStack: OpenStack{ Region: kola.OpenStackOptions.Region, @@ -604,8 +604,8 @@ func syncFindParentImageOptions() error { if err != nil { return err } - case "gce": - kola.GCEOptions.Image, err = parentCosaBuild.FindGCPImage() + case "gcp": + kola.GCPOptions.Image, err = parentCosaBuild.FindGCPImage() if err != nil { return err } diff --git a/mantle/cmd/kola/options.go b/mantle/cmd/kola/options.go index a254a1cf93626bfd844d690f4c69b70082e89efb..012847da49ac2c68b5eebca82d1103ce0aa39892 100644 --- a/mantle/cmd/kola/options.go +++ b/mantle/cmd/kola/options.go @@ -1,3 +1,4 @@ +// Copyright 2023 Red Hat // Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,7 +40,7 @@ var ( kolaPlatform string kolaParallelArg string kolaArchitectures = []string{"amd64"} - kolaPlatforms = []string{"aws", "azure", "do", "esx", "gce", "openstack", "packet", "qemu", "qemu-iso"} + kolaPlatforms = []string{"aws", "azure", "do", "esx", "gcp", "openstack", "packet", "qemu", "qemu-iso"} kolaDistros = []string{"fcos", "rhcos", "scos", "nestos"} ) @@ -86,8 +87,7 @@ func init() { sv(&kola.AWSOptions.IAMInstanceProfile, "aws-iam-profile", "kola", "AWS IAM instance profile name") // azure-specific options - sv(&kola.AzureOptions.AzureProfile, "azure-profile", "", "Azure profile (default \"~/"+auth.AzureProfilePath+"\")") - sv(&kola.AzureOptions.AzureAuthLocation, "azure-auth", "", "Azure auth location (default \"~/"+auth.AzureAuthPath+"\")") + sv(&kola.AzureOptions.AzureCredentials, "azure-credentials", "", "Azure credentials file location (default \"~/"+auth.AzureCredentialsPath+"\")") sv(&kola.AzureOptions.DiskURI, "azure-disk-uri", "", "Azure disk uri (custom images)") sv(&kola.AzureOptions.Publisher, "azure-publisher", "CoreOS", "Azure image publisher (default \"CoreOS\"") sv(&kola.AzureOptions.Offer, "azure-offer", "CoreOS", "Azure image offer (default \"CoreOS\"") @@ -110,16 +110,17 @@ func init() { sv(&kola.ESXOptions.Profile, "esx-profile", "", "ESX profile (default \"default\")") sv(&kola.ESXOptions.BaseVMName, "esx-base-vm", "", "ESX base VM name") - // gce-specific options - sv(&kola.GCEOptions.Image, "gce-image", "", "GCE image, full api endpoints names are accepted if resource is in a different project") - sv(&kola.GCEOptions.Project, "gce-project", "fedora-coreos-devel", "GCE project name") - sv(&kola.GCEOptions.Zone, "gce-zone", "us-central1-a", "GCE zone name") - sv(&kola.GCEOptions.MachineType, "gce-machinetype", "n1-standard-1", "GCE machine type") - sv(&kola.GCEOptions.DiskType, "gce-disktype", "pd-ssd", "GCE disk type") - sv(&kola.GCEOptions.Network, "gce-network", "default", "GCE network") - sv(&kola.GCEOptions.ServiceAcct, "gce-service-account", "", "GCE service account to attach to instance (default project default)") - bv(&kola.GCEOptions.ServiceAuth, "gce-service-auth", false, "for non-interactive auth when running within GCE") - sv(&kola.GCEOptions.JSONKeyFile, "gce-json-key", "", "use a service account's JSON key for authentication (default \"~/"+auth.GCEConfigPath+"\")") + // gcp-specific options + sv(&kola.GCPOptions.Image, "gcp-image", "", "GCP image, full api endpoints names are accepted if resource is in a different project") + sv(&kola.GCPOptions.Project, "gcp-project", "fedora-coreos-devel", "GCP project name") + sv(&kola.GCPOptions.Zone, "gcp-zone", "us-central1-a", "GCP zone name") + sv(&kola.GCPOptions.MachineType, "gcp-machinetype", "", "GCP machine type") + sv(&kola.GCPOptions.DiskType, "gcp-disktype", "pd-ssd", "GCP disk type") + sv(&kola.GCPOptions.Network, "gcp-network", "default", "GCP network") + sv(&kola.GCPOptions.ServiceAcct, "gcp-service-account", "", "GCP service account to attach to instance (default project default)") + bv(&kola.GCPOptions.ServiceAuth, "gcp-service-auth", false, "for non-interactive auth when running within GCP") + sv(&kola.GCPOptions.JSONKeyFile, "gcp-json-key", "", "use a service account's JSON key for authentication (default \"~/"+auth.GCPConfigPath+"\")") + bv(&kola.GCPOptions.Confidential, "gcp-confidential-vm", false, "create confidential instances") // openstack-specific options sv(&kola.OpenStackOptions.ConfigPath, "openstack-config-file", "", "Path to a clouds.yaml formatted OpenStack config file. The underlying library defaults to ./clouds.yaml") @@ -235,6 +236,23 @@ func syncOptionsImpl(useCosa bool) error { fmt.Printf("Using %s instance type\n", kola.AWSOptions.InstanceType) } + // Choose an appropriate GCP instance type for the target architecture + if kolaPlatform == "gcp" && kola.GCPOptions.MachineType == "" { + switch kola.Options.CosaBuildArch { + case "x86_64": + if kola.GCPOptions.Confidential { + // https://cloud.google.com/compute/confidential-vm/docs/locations + fmt.Print("Setting instance type for confidential computing") + kola.GCPOptions.MachineType = "n2d-standard-2" + } else { + kola.GCPOptions.MachineType = "n1-standard-1" + } + case "aarch64": + kola.GCPOptions.MachineType = "t2a-standard-1" + } + fmt.Printf("Using %s instance type\n", kola.GCPOptions.MachineType) + } + // if no external dirs were given, automatically add the working directory; // does nothing if ./tests/kola/ doesn't exist if len(runExternals) == 0 { @@ -361,15 +379,15 @@ func syncCosaOptions() error { } } } - case "gce": + case "gcp": // Pick up the GCP image from the build metadata - if kola.GCEOptions.Image == "" && kola.CosaBuild.Meta.Gcp != nil { - kola.GCEOptions.Image = + if kola.GCPOptions.Image == "" && kola.CosaBuild.Meta.Gcp != nil { + kola.GCPOptions.Image = fmt.Sprintf("projects/%s/global/images/%s", kola.CosaBuild.Meta.Gcp.ImageProject, kola.CosaBuild.Meta.Gcp.ImageName) - fmt.Printf("Using GCP image %s\n", kola.GCEOptions.Image) + fmt.Printf("Using GCP image %s\n", kola.GCPOptions.Image) } } diff --git a/mantle/kola/harness.go b/mantle/kola/harness.go index ad3dd8758ca38e12f8b4e584f3ed0122a9a2c227..dc71dd27fa6fa6aecf771b271902a3b24911f3b9 100644 --- a/mantle/kola/harness.go +++ b/mantle/kola/harness.go @@ -100,7 +100,7 @@ var ( AzureOptions = azureapi.Options{Options: &Options} // glue to set platform options from main DOOptions = doapi.Options{Options: &Options} // glue to set platform options from main ESXOptions = esxapi.Options{Options: &Options} // glue to set platform options from main - GCEOptions = gcloudapi.Options{Options: &Options} // glue to set platform options from main + GCPOptions = gcloudapi.Options{Options: &Options} // glue to set platform options from main OpenStackOptions = openstackapi.Options{Options: &Options} // glue to set platform options from main PacketOptions = packetapi.Options{Options: &Options} // glue to set platform options from main QEMUOptions = qemu.Options{Options: &Options} // glue to set platform options from main @@ -265,8 +265,8 @@ func NewFlight(pltfrm string) (flight platform.Flight, err error) { flight, err = do.NewFlight(&DOOptions) case "esx": flight, err = esx.NewFlight(&ESXOptions) - case "gce": - flight, err = gcloud.NewFlight(&GCEOptions) + case "gcp": + flight, err = gcloud.NewFlight(&GCPOptions) case "openstack": flight, err = openstack.NewFlight(&OpenStackOptions) case "packet": diff --git a/src/cmd-remote-prune b/src/cmd-remote-prune index d5c3e4cecd00fd9e39983d1734c4841012fec101..ab7f9cc0b09322d2d28450f4bcbb71b9f8556d91 100755 --- a/src/cmd-remote-prune +++ b/src/cmd-remote-prune @@ -15,10 +15,8 @@ parser = argparse.ArgumentParser(prog="coreos-assembler remote-prune") parser.add_argument("--workdir", default='.', help="Path to workdir") parser.add_argument("--dry-run", help="Don't actually delete anything", action='store_true') -parser.add_argument('--azure-auth', help='Path to Azure auth file', - default=os.environ.get("AZURE_AUTH")) -parser.add_argument('--azure-profile', help='Path to Azure profile', - default=os.environ.get('AZURE_PROFILE')) +parser.add_argument('--azure-credentials', help='Path to Azure credentials file', + default=os.environ.get("AZURE_CREDENTIALS")) parser.add_argument('--azure-resource-group', help='Resource group', default=os.environ.get('AZURE_RESOURCE_GROUP')) parser.add_argument("--gcp-json-key", help="GCP Service Account JSON Auth", @@ -59,8 +57,7 @@ if args.dry_run: cloud_config = { 'azure': { - 'auth': args.azure_auth, - 'profile': args.azure_profile, + 'credentials': args.azure_credentials, 'resource-group': args.azure_resource_group, }, 'gcp': { diff --git a/src/cosalib/aws.py b/src/cosalib/aws.py index 158fdd74d1fd1afed2eb1a1822774d9da74bf6f2..9c897a8579b4525a143b5bfc2215c30f295f2aee 100644 --- a/src/cosalib/aws.py +++ b/src/cosalib/aws.py @@ -1,12 +1,14 @@ import boto3 import json +import os import subprocess import sys from cosalib.cmdlib import ( - retry_stop, + flatten_image_yaml, retry_boto_exception, - retry_callback + retry_callback, + retry_stop ) from tenacity import ( retry, @@ -37,9 +39,18 @@ def aws_run_ore_replicate(build, args): if len(buildmeta.get('amis', [])) < 1: raise SystemExit(("buildmeta doesn't contain source AMIs." " Run buildextend-aws --upload first")) + + # Determine which region to copy from + if not args.source_region: + args.source_region = buildmeta['amis'][0]['name'] + + # If no region specified then autodetect the regions to replicate to. + # Specify --region=args.source_region here so ore knows to talk to + # a region that exists (i.e. it will talk to govcloud if copying + # from a govcloud region). if not args.region: args.region = subprocess.check_output([ - 'ore', 'aws', 'list-regions' + 'ore', 'aws', '--region', args.source_region, 'list-regions' ]).decode().strip().split() # only replicate to regions that don't already exist @@ -54,9 +65,6 @@ def aws_run_ore_replicate(build, args): print("no new regions detected") sys.exit(0) - if not args.source_region: - args.source_region = buildmeta['amis'][0]['name'] - source_image = None for a in buildmeta['amis']: if a['name'] == args.source_region: @@ -77,6 +85,7 @@ def aws_run_ore_replicate(build, args): ore_args.extend(region_list) print("+ {}".format(subprocess.list2cmdline(ore_args))) + ore_data = "" try: ore_data = subprocess.check_output(ore_args, encoding='utf-8') except subprocess.CalledProcessError as e: @@ -112,9 +121,23 @@ def aws_run_ore(build, args): if args.force: ore_args.extend(['--force']) + if args.credentials_file: + ore_args.extend(['--credentials-file', args.credentials_file]) + region = "us-east-1" if args.region is not None and len(args.region) > 0: region = args.region[0] + # Capture any input from image.yaml + image_yaml = flatten_image_yaml( + '/usr/lib/coreos-assembler/image-default.yaml', + flatten_image_yaml('src/config/image.yaml') + ) + if 'aws-imdsv2-only' in image_yaml and image_yaml['aws-imdsv2-only']: + ore_args.extend(['--imdsv2-only']) + if 'aws-volume-type' in image_yaml: + ore_args.extend(['--volume-type', image_yaml['aws-volume-type']]) + if 'aws-x86-boot-mode' in image_yaml: + ore_args.extend(['--x86-boot-mode', image_yaml['aws-x86-boot-mode']]) ore_args.extend([ '--region', f"{region}", @@ -131,6 +154,8 @@ def aws_run_ore(build, args): ore_args.extend(['--grant-user', user]) for user in args.grant_user_snapshot: ore_args.extend(['--grant-user-snapshot', user]) + if args.public: + ore_args.extend(['--public']) print("+ {}".format(subprocess.list2cmdline(ore_args))) ore_data = json.loads(subprocess.check_output(ore_args)) @@ -158,9 +183,12 @@ def aws_run_ore(build, args): def aws_cli(parser): parser.add_argument("--bucket", help="S3 Bucket") + parser.add_argument("--credentials-file", help="AWS config file", + default=os.environ.get("AWS_CONFIG_FILE")) parser.add_argument("--name-suffix", help="Suffix for name") parser.add_argument("--grant-user", help="Grant user launch permission", nargs="*", default=[]) parser.add_argument("--grant-user-snapshot", help="Grant user snapshot volume permission", nargs="*", default=[]) + parser.add_argument("--public", action="store_true", help="Mark images as publicly available") return parser diff --git a/src/cosalib/azure.py b/src/cosalib/azure.py index 24e073305988e6545bf91ace0b7e5d198f0d8c8e..b3ba14b23e217b350db7e2e768bf9bec59365983 100644 --- a/src/cosalib/azure.py +++ b/src/cosalib/azure.py @@ -8,14 +8,13 @@ from tenacity import ( @retry(reraise=True, stop=stop_after_attempt(3)) -def remove_azure_image(image, resource_group, auth, profile): +def remove_azure_image(image, resource_group, credentials): print(f"Azure: removing image {image}") try: runcmd([ 'ore', 'azure', - '--azure-auth', auth, - '--azure-profile', profile, - 'delete-image-arm', + '--azure-credentials', credentials, + 'delete-image', '--image-name', image, '--resource-group', resource_group ]) @@ -39,10 +38,9 @@ def azure_run_ore(build, args): ore_args = [ 'ore', '--log-level', args.log_level, - 'azure', 'upload-blob-arm', - '--azure-auth', args.auth, + 'azure', 'upload-blob', + '--azure-credentials', args.credentials, '--azure-location', args.location, - '--azure-profile', args.profile, '--blob-name', azure_vhd_name, '--file', f"{build.image_path}", '--container', args.container, @@ -77,9 +75,9 @@ def azure_cli(parser): Common Azure CLI """ parser.add_argument( - '--auth', - help='Path to Azure auth file', - default=os.environ.get("AZURE_AUTH")) + '--credentials', + help='Path to Azure credentials file', + default=os.environ.get("AZURE_CREDENTIALS")) parser.add_argument( '--container', help='Storage location to write to', @@ -90,11 +88,6 @@ def azure_cli(parser): help='Azure location (default westus)', default=os.environ.get("AZURE_LOCATION", "westus") ) - parser.add_argument( - '--profile', - help='Path to Azure profile', - default=os.environ.get('AZURE_PROFILE') - ) parser.add_argument( '--resource-group', help='Resource group', diff --git a/src/cosalib/gcp.py b/src/cosalib/gcp.py index 038636fb351622b6c09e8122b0f3032e870fbf2f..39cdad419685fda79d1ad459408e522a5f0e7e8e 100644 --- a/src/cosalib/gcp.py +++ b/src/cosalib/gcp.py @@ -42,6 +42,8 @@ def gcp_run_ore(build, args): raise Exception(arg_exp_str.format("json-key", "GCP_JSON_AUTH")) if args.project is None: raise Exception(arg_exp_str.format("project", "GCP_PROJECT")) + if not args.create_image: + raise Exception("Invalid to call with --create-image=False") gcp_name = re.sub(r'[_\.]', '-', build.image_name_base) if not re.fullmatch(GCP_NAMING_RE, gcp_name): @@ -61,18 +63,18 @@ def gcp_run_ore(build, args): ore_upload_cmd = ore_common_args + [ 'upload', '--basename', build.build_name, + '--arch', build.basearch, '--force', # We want to support restarting the pipeline '--bucket', f'{args.bucket}', '--name', gcp_name, '--file', f"{build.image_path}", '--write-url', urltmp, + '--create-image=true', ] if args.description: ore_upload_cmd.extend(['--description', args.description]) if args.public: ore_upload_cmd.extend(['--public']) - if not args.create_image: - ore_upload_cmd.extend(['--create-image=false']) for license in args.license or DEFAULT_LICENSES: ore_upload_cmd.extend(['--license', license]) runcmd(ore_upload_cmd) @@ -128,12 +130,6 @@ def gcp_cli(parser): parser.add_argument("--bucket", help="Storage account to write image to", default=os.environ.get("GCP_BUCKET")) - parser.add_argument("--gce", - help="Use GCE as the platform ID instead of GCP", - action="store_true", - default=bool( - os.environ.get("GCP_GCE_PLATFORM_ID", False)) - ) parser.add_argument("--json-key", help="GCP Service Account JSON Auth", default=os.environ.get("GCP_JSON_AUTH")) @@ -146,6 +142,7 @@ def gcp_cli(parser): parser.add_argument("--description", help="The description that should be attached to the image", default=None) + # Remove --create-image after some time in which callers can be updated parser.add_argument("--create-image", type=boolean_string, help="Whether or not to create an image in GCP after upload.", diff --git a/src/cosalib/ibmcloud.py b/src/cosalib/ibmcloud.py index 1d6975b0bb353a35f34ccd037d0219b7ec7ccee6..7a6e3c560f77e1d53e19060ec840d73a855da576 100644 --- a/src/cosalib/ibmcloud.py +++ b/src/cosalib/ibmcloud.py @@ -37,7 +37,7 @@ VARIANTS = { "image_format": "raw", "image_suffix": "ova.gz", "platform": "powervs", - "gzip": True, + "compression": "gzip", "tar_members": [ "disk.raw" ] diff --git a/src/cosalib/prune.py b/src/cosalib/prune.py index 01613a87980ee4a4f5b58dce05f4b2db137879a3..eee6c338b3234e10e62934208701d809b22b0e77 100644 --- a/src/cosalib/prune.py +++ b/src/cosalib/prune.py @@ -2,12 +2,7 @@ import collections import json import os -from cosalib.s3 import ( - head_object, - list_objects, - download_file, - delete_object -) +from cosalib.s3 import S3 from cosalib.aws import ( deregister_ami, @@ -30,7 +25,7 @@ def get_unreferenced_s3_builds(active_build_set, bucket, prefix): :type active_build_set: list """ print(f"Looking for unreferenced builds in s3://{bucket}/{prefix}") - s3_subdirs = list_objects(bucket, f"{prefix}/", result_key='CommonPrefixes') + s3_subdirs = S3().list_objects(bucket, f"{prefix}/", result_key='CommonPrefixes') s3_matched = set() s3_unmatched = set() for prefixKey in s3_subdirs: @@ -58,10 +53,10 @@ def fetch_build_meta(builds, buildid, arch, bucket, prefix): os.makedirs(build_dir, exist_ok=True) s3_key = f"{prefix}/{buildid}/{arch}/meta.json" print(f"Fetching meta.json for '{buildid}' from s3://{bucket}/{prefix} to {meta_json_path}") - head_result = head_object(bucket, s3_key) + head_result = S3().head_object(bucket, s3_key) if head_result: print(f"Found s3 key at {s3_key}") - download_file(bucket, s3_key, meta_json_path) + S3().download_file(bucket, s3_key, meta_json_path) else: print(f"Failed to find object at {s3_key}") return None @@ -115,11 +110,10 @@ def delete_build(build, bucket, prefix, cloud_config, force=False): if azure: image = azure.get('image') resource_group = cloud_config.get('azure', {}).get('resource-group') - auth = cloud_config.get('azure', {}).get('auth') - profile = cloud_config.get('azure', {}).get('profile') - if image and resource_group and auth and profile: + credentials = cloud_config.get('azure', {}).get('credentials') + if image and resource_group and credentials: try: - remove_azure_image(image, resource_group, auth, profile) + remove_azure_image(image, resource_group, credentials) except Exception as e: errors.append(e) @@ -143,4 +137,4 @@ def delete_build(build, bucket, prefix, cloud_config, force=False): # Delete s3 bucket print(f"Deleting key {prefix}{build.id} from bucket {bucket}") - delete_object(bucket, f"{prefix}{str(build.id)}") + S3().delete_object(bucket, f"{prefix}{str(build.id)}") diff --git a/src/cosalib/s3.py b/src/cosalib/s3.py index ff4206b50743894b41d371c736108a9082456f30..356e30121b552497e0115f27b3df3c165e4e8335 100644 --- a/src/cosalib/s3.py +++ b/src/cosalib/s3.py @@ -9,51 +9,46 @@ from cosalib.cmdlib import ( from tenacity import retry -S3 = boto3.client('s3') -S3CONFIG = boto3.s3.transfer.TransferConfig( - num_download_attempts=5 -) - - -def download_file(bucket, key, dest): - S3.download_file(bucket, key, dest, Config=S3CONFIG) - - -@retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) -def head_bucket(bucket): - S3.head_bucket(Bucket=bucket) - - -@retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) -def head_object(bucket, key): - try: - S3.head_object(Bucket=bucket, Key=key) - except ClientError as e: - if e.response['Error']['Code'] == '404': - return False - raise e - return True - - -@retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) -def list_objects(bucket, prefix, delimiter="/", result_key='Contents'): - kwargs = { - 'Bucket': bucket, - 'Delimiter': delimiter, - 'Prefix': prefix, - } - isTruncated = True - while isTruncated: - batch = S3.list_objects_v2(**kwargs) - yield from batch.get(result_key) or [] - kwargs['ContinuationToken'] = batch.get('NextContinuationToken') - isTruncated = batch['IsTruncated'] - - -@retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) -def delete_object(bucket, key): - sub_objects = list(list_objects(bucket, key)) - if sub_objects != []: - print("S3: deleting {sub_objects}") - S3.delete_objects(Bucket=bucket, Delete=sub_objects) - S3.delete_object(Bucket=bucket, Key=key) +class S3(object): + def __init__(self): + self.client = boto3.client('s3') + + def download_file(self, bucket, key, dest): + self.client.download_file(bucket, key, dest, + Config=boto3.s3.transfer.TransferConfig(num_download_attempts=5)) + + @retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) + def head_bucket(self, bucket): + self.client.head_bucket(Bucket=bucket) + + @retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) + def head_object(self, bucket, key): + try: + self.client.head_object(Bucket=bucket, Key=key) + except ClientError as e: + if e.response['Error']['Code'] == '404': + return False + raise e + return True + + @retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) + def list_objects(self, bucket, prefix, delimiter="/", result_key='Contents'): + kwargs = { + 'Bucket': bucket, + 'Delimiter': delimiter, + 'Prefix': prefix, + } + isTruncated = True + while isTruncated: + batch = self.client.list_objects_v2(**kwargs) + yield from batch.get(result_key) or [] + kwargs['ContinuationToken'] = batch.get('NextContinuationToken') + isTruncated = batch['IsTruncated'] + + @retry(stop=retry_stop, retry=retry_boto_exception, before_sleep=retry_callback) + def delete_object(self, bucket, key): + sub_objects = list(self.list_objects(bucket, key)) + if sub_objects != []: + print("S3: deleting {sub_objects}") + self.client.delete_objects(Bucket=bucket, Delete=sub_objects) + self.client.delete_object(Bucket=bucket, Key=key)